element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
file
|
openshift/openshift-tests-private
|
5da5beec-ad8b-47d3-949d-8cdbac3719e9
|
cloud_util
|
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"math/rand"
"net/url"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/tidwall/gjson"
"golang.org/x/crypto/ssh"
"net"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
"github.com/vmware/govmomi"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
package networking
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"math/rand"
"net/url"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/tidwall/gjson"
"golang.org/x/crypto/ssh"
"net"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
"github.com/vmware/govmomi"
)
type tcpdumpDaemonSet struct {
name string
namespace string
nodeLabel string
labelKey string
phyInterface string
dstPort int
dstHost string
template string
}
type ibmPowerVsInstance struct {
instance
ibmRegion string
ibmVpcName string
clientPowerVs *exutil.IBMPowerVsSession
}
func (ds *tcpdumpDaemonSet) createTcpdumpDS(oc *exutil.CLI) error {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ds.template, "-p", "NAME="+ds.name, "NAMESPACE="+ds.namespace, "NODELABEL="+ds.nodeLabel, "LABELKEY="+ds.labelKey, "INF="+ds.phyInterface, "DSTPORT="+strconv.Itoa(ds.dstPort), "HOST="+ds.dstHost)
if err1 != nil {
e2e.Logf("Tcpdump daemonset created failed :%v, and try next round", err1)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("fail to create Tcpdump daemonset %v", ds.name)
}
return nil
}
func deleteTcpdumpDS(oc *exutil.CLI, dsName, dsNS string) {
_, err := runOcWithRetry(oc.AsAdmin(), "delete", "ds", dsName, "-n", dsNS, "--ignore-not-found=true")
o.Expect(err).NotTo(o.HaveOccurred())
}
// Get AWS credential from cluster
func getAwsCredentialFromCluster(oc *exutil.CLI) error {
if exutil.CheckPlatform(oc) != "aws" {
g.Skip("it is not aws platform and can not get credential, and then skip it.")
}
credential, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/aws-creds", "-n", "kube-system", "-o", "json").Output()
// Skip for sts and c2s clusters.
if err != nil {
e2e.Logf("Cannot get AWS basic auth credential,%v", err)
return err
}
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyIDBase64, secureKeyBase64 := gjson.Get(credential, `data.aws_access_key_id`).String(), gjson.Get(credential, `data.aws_secret_access_key`).String()
accessKeyID, err1 := base64.StdEncoding.DecodeString(accessKeyIDBase64)
o.Expect(err1).NotTo(o.HaveOccurred())
secureKey, err2 := base64.StdEncoding.DecodeString(secureKeyBase64)
o.Expect(err2).NotTo(o.HaveOccurred())
clusterRegion, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err3).NotTo(o.HaveOccurred())
os.Setenv("AWS_ACCESS_KEY_ID", string(accessKeyID))
os.Setenv("AWS_SECRET_ACCESS_KEY", string(secureKey))
os.Setenv("AWS_REGION", clusterRegion)
return nil
}
// Get AWS int svc instance ID
func getAwsIntSvcInstanceID(a *exutil.AwsClient, oc *exutil.CLI) (string, error) {
clusterPrefixName := exutil.GetClusterPrefixName(oc)
instanceName := clusterPrefixName + "-int-svc"
instanceID, err := a.GetAwsInstanceID(instanceName)
if err != nil {
e2e.Logf("Get bastion instance id failed with error %v .", err)
return "", err
}
return instanceID, nil
}
// Get int svc instance private ip and public ip
func getAwsIntSvcIPs(a *exutil.AwsClient, oc *exutil.CLI) map[string]string {
instanceID, err := getAwsIntSvcInstanceID(a, oc)
o.Expect(err).NotTo(o.HaveOccurred())
ips, err := a.GetAwsIntIPs(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
return ips
}
// Update int svc instance ingress rule to allow destination port
func updateAwsIntSvcSecurityRule(a *exutil.AwsClient, oc *exutil.CLI, dstPort int64) {
instanceID, err := getAwsIntSvcInstanceID(a, oc)
o.Expect(err).NotTo(o.HaveOccurred())
err = a.UpdateAwsIntSecurityRule(instanceID, dstPort)
o.Expect(err).NotTo(o.HaveOccurred())
}
func installIPEchoServiceOnAWS(a *exutil.AwsClient, oc *exutil.CLI) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_AWS_USER")
if user == "" {
user = "core"
}
sshkey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
command := "sudo netstat -ntlp | grep 9095 || sudo podman run --name ipecho -d -p 9095:80 quay.io/openshifttest/ip-echo:1.2.0"
e2e.Logf("Run command", command)
ips := getAwsIntSvcIPs(a, oc)
publicIP, ok := ips["publicIP"]
if !ok {
return "", fmt.Errorf("no public IP found for Int Svc instance")
}
privateIP, ok := ips["privateIP"]
if !ok {
return "", fmt.Errorf("no private IP found for Int Svc instance")
}
sshClient := exutil.SshClient{User: user, Host: publicIP, Port: 22, PrivateKey: sshkey}
err = sshClient.Run(command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, err)
return "", err
}
updateAwsIntSvcSecurityRule(a, oc, 9095)
ipEchoURL := net.JoinHostPort(privateIP, "9095")
return ipEchoURL, nil
}
func getIfaddrFromNode(nodeName string, oc *exutil.CLI) string {
egressIpconfig, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.annotations.cloud\\.network\\.openshift\\.io/egress-ipconfig}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The egressipconfig is %v", egressIpconfig)
if len(egressIpconfig) == 0 {
e2e.Logf("The node %s doesn't have egressIP annotation", nodeName)
return ""
}
ifaddr := strings.Split(egressIpconfig, "\"")[9]
e2e.Logf("The subnet of node %s is %v .", nodeName, ifaddr)
return ifaddr
}
func getPrimaryIfaddrFromBMNode(oc *exutil.CLI, nodeName string) (string, string) {
primaryIfaddr, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.annotations.k8s\\.ovn\\.org/node-primary-ifaddr}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The primaryIfaddr is %v for node %s", primaryIfaddr, nodeName)
var ipv4Ifaddr, ipv6Ifaddr string
tempSlice := strings.Split(primaryIfaddr, "\"")
ipStackType := checkIPStackType(oc)
switch ipStackType {
case "ipv4single":
o.Expect(len(tempSlice) > 3).Should(o.BeTrue())
ipv4Ifaddr = tempSlice[3]
e2e.Logf("The ipv4 subnet of node %s is %v .", nodeName, ipv4Ifaddr)
case "dualstack":
o.Expect(len(tempSlice) > 7).Should(o.BeTrue())
ipv4Ifaddr = tempSlice[3]
ipv6Ifaddr = tempSlice[7]
e2e.Logf("The ipv4 subnet of node %s is %v, ipv6 subnet is :%v", nodeName, ipv4Ifaddr, ipv6Ifaddr)
case "ipv6single":
o.Expect(len(tempSlice) > 3).Should(o.BeTrue())
ipv6Ifaddr = tempSlice[3]
e2e.Logf("The ipv6 subnet of node %s is %v .", nodeName, ipv6Ifaddr)
default:
e2e.Logf("Get ipStackType as %s", ipStackType)
g.Skip("Skip for not supported IP stack type!! ")
}
return ipv4Ifaddr, ipv6Ifaddr
}
func findUnUsedIPsOnNode(oc *exutil.CLI, nodeName, cidr string, number int) []string {
ipRange, _ := Hosts(cidr)
var ipUnused = []string{}
//shuffle the ips slice
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(ipRange), func(i, j int) { ipRange[i], ipRange[j] = ipRange[j], ipRange[i] })
var err error
var podName string
var ns string
podName, err = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
ns = "openshift-ovn-kubernetes"
for _, ip := range ipRange {
if len(ipUnused) < number {
pingCmd := "ping -c4 -t1 " + ip
msg, err := exutil.RemoteShPodWithBash(oc, ns, podName, pingCmd)
if err != nil && (strings.Contains(msg, "Destination Host Unreachable") || strings.Contains(msg, "100% packet loss")) {
e2e.Logf("%s is not used!\n", ip)
ipUnused = append(ipUnused, ip)
} else if err != nil {
break
}
} else {
break
}
}
return ipUnused
}
func findFreeIPs(oc *exutil.CLI, nodeName string, number int) []string {
var freeIPs []string
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "vsphere") {
sub1, err := getDefaultSubnet(oc)
o.Expect(err).NotTo(o.HaveOccurred())
freeIPs = findUnUsedIPs(oc, sub1, number)
} else if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") || strings.Contains(platform, "nutanix") || strings.Contains(platform, "kubevirt") || strings.Contains(platform, "powervs") {
ipv4Sub, _ := getPrimaryIfaddrFromBMNode(oc, nodeName)
tempSlice := strings.Split(ipv4Sub, "/")
o.Expect(len(tempSlice) > 1).Should(o.BeTrue())
preFix, err := strconv.Atoi(tempSlice[1])
o.Expect(err).NotTo(o.HaveOccurred())
if preFix > 29 {
g.Skip("There might be no enough free IPs in current subnet, skip the test!!")
}
freeIPs = findUnUsedIPsOnNode(oc, nodeName, ipv4Sub, number)
} else {
sub1 := getIfaddrFromNode(nodeName, oc)
if len(sub1) == 0 && strings.Contains(platform, "gcp") {
g.Skip("Skip the tests as no egressIP annoatation on this platform nodes!!")
}
o.Expect(len(sub1) == 0).NotTo(o.BeTrue())
freeIPs = findUnUsedIPsOnNode(oc, nodeName, sub1, number)
}
return freeIPs
}
func findFreeIPsForCIDRs(oc *exutil.CLI, nodeName, cidr string, number int) []string {
var freeIPs []string
freeIPs = findUnUsedIPsOnNode(oc, nodeName, cidr, number)
o.Expect(len(freeIPs)).Should(o.Equal(number))
return freeIPs
}
func findFreeIPv6s(oc *exutil.CLI, nodeName string, number int) []string {
var freeIPs []string
_, ipv6Sub := getPrimaryIfaddrFromBMNode(oc, nodeName)
tempSlice := strings.Split(ipv6Sub, "/")
o.Expect(len(tempSlice) > 1).Should(o.BeTrue())
preFix, err := strconv.Atoi(tempSlice[1])
o.Expect(err).NotTo(o.HaveOccurred())
if preFix > 126 {
g.Skip("There might be no enough free IPs in current subnet, skip the test!!")
}
freeIPs, err = findUnUsedIPv6(oc, ipv6Sub, number)
o.Expect(err).NotTo(o.HaveOccurred())
return freeIPs
}
func execCommandInOVNPodOnNode(oc *exutil.CLI, nodeName, command string) (string, error) {
ovnPodName, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
msg, err := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPodName, command)
if err != nil {
e2e.Logf("Execute ovn command failed with err:%v .", err)
return msg, err
}
return msg, nil
}
func execCommandInSDNPodOnNode(oc *exutil.CLI, nodeName, command string) (string, error) {
sdnPodName, err := exutil.GetPodName(oc, "openshift-sdn", "app=sdn", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
msg, err := exutil.RemoteShPodWithBash(oc, "openshift-sdn", sdnPodName, command)
if err != nil {
e2e.Logf("Execute sdn command failed with err:%v .", err)
return msg, err
}
return msg, nil
}
func getgcloudClient(oc *exutil.CLI) *exutil.Gcloud {
if exutil.CheckPlatform(oc) != "gcp" {
g.Skip("it is not gcp platform!")
}
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if projectID != "openshift-qe" {
g.Skip("openshift-qe project is needed to execute this test case!")
}
gcloud := exutil.Gcloud{ProjectID: projectID}
return gcloud.Login()
}
func getIntSvcExternalIPFromGcp(oc *exutil.CLI, infraID string) (string, error) {
externalIP, err := getgcloudClient(oc).GetIntSvcExternalIP(infraID)
e2e.Logf("Additional VM external ip: %s", externalIP)
return externalIP, err
}
func installIPEchoServiceOnGCP(oc *exutil.CLI, infraID string, host string) (string, error) {
e2e.Logf("Infra id: %s, install ipecho service on host %s", infraID, host)
// Run ip-echo service on the additional VM
serviceName := "ip-echo"
internalIP, err := getgcloudClient(oc).GetIntSvcInternalIP(infraID)
o.Expect(err).NotTo(o.HaveOccurred())
port := "9095"
runIPEcho := fmt.Sprintf("sudo netstat -ntlp | grep %s || sudo podman run --name %s -d -p %s:80 quay.io/openshifttest/ip-echo:1.2.0", port, serviceName, port)
user := os.Getenv("SSH_CLOUD_PRIV_GCP_USER")
if user == "" {
user = "core"
}
err = sshRunCmd(host, user, runIPEcho)
if err != nil {
e2e.Logf("Failed to run %v: %v", runIPEcho, err)
return "", err
}
// Update firewall rules to expose ip-echo service
ruleName := fmt.Sprintf("%s-int-svc-ingress-allow", infraID)
ports, err := getgcloudClient(oc).GetFirewallAllowPorts(ruleName)
if err != nil {
e2e.Logf("Failed to update firewall rules for port %v: %v", ports, err)
return "", err
}
if !strings.Contains(ports, "tcp:"+port) {
addIPEchoPort := fmt.Sprintf("%s,tcp:%s", ports, port)
updateFirewallPortErr := getgcloudClient(oc).UpdateFirewallAllowPorts(ruleName, addIPEchoPort)
if updateFirewallPortErr != nil {
return "", updateFirewallPortErr
}
e2e.Logf("Allow Ports: %s", addIPEchoPort)
}
ipEchoURL := net.JoinHostPort(internalIP, port)
return ipEchoURL, nil
}
func uninstallIPEchoServiceOnGCP(oc *exutil.CLI) {
infraID, err := exutil.GetInfraID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
host, err := getIntSvcExternalIPFromGcp(oc, infraID)
o.Expect(err).NotTo(o.HaveOccurred())
//Remove ip-echo service
user := os.Getenv("SSH_CLOUD_PRIV_GCP_USER")
if user == "" {
user = "cloud-user"
}
o.Expect(sshRunCmd(host, user, "sudo podman rm ip-echo -f")).NotTo(o.HaveOccurred())
//Update firewall rules
ruleName := fmt.Sprintf("%s-int-svc-ingress-allow", infraID)
ports, err := getgcloudClient(oc).GetFirewallAllowPorts(ruleName)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(ports, "tcp:9095") {
updatedPorts := strings.Replace(ports, ",tcp:9095", "", -1)
o.Expect(getgcloudClient(oc).UpdateFirewallAllowPorts(ruleName, updatedPorts)).NotTo(o.HaveOccurred())
}
}
func getZoneOfInstanceFromGcp(oc *exutil.CLI, infraID string, workerName string) (string, error) {
zone, err := getgcloudClient(oc).GetZone(infraID, workerName)
e2e.Logf("zone for instance %v is: %s", workerName, zone)
return zone, err
}
func startInstanceOnGcp(oc *exutil.CLI, nodeName string, zone string) error {
err := getgcloudClient(oc).StartInstance(nodeName, zone)
return err
}
func stopInstanceOnGcp(oc *exutil.CLI, nodeName string, zone string) error {
err := getgcloudClient(oc).StopInstance(nodeName, zone)
return err
}
// Run timeout ssh connection test from GCP int-svc instance
func accessEgressNodeFromIntSvcInstanceOnGCP(host string, IPaddr string) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_GCP_USER")
if user == "" {
user = "core"
}
cmd := fmt.Sprintf(`timeout 5 bash -c "</dev/tcp/%v/22"`, IPaddr)
err := sshRunCmd(host, user, cmd)
if err != nil {
e2e.Logf("Failed to run %v: %v", cmd, err)
// Extract the return code from the err1 variable
if returnedErr, ok := err.(*ssh.ExitError); ok {
return fmt.Sprintf("%d", returnedErr.ExitStatus()), err
}
// IO problems, the return code was not sent back
return "", err
}
return "0", nil
}
// start one AWS instance
func startInstanceOnAWS(a *exutil.AwsClient, hostname string) {
instanceID, err := a.GetAwsInstanceIDFromHostname(hostname)
o.Expect(err).NotTo(o.HaveOccurred())
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
state, err := a.GetAwsInstanceState(instanceID)
if err != nil {
e2e.Logf("%v", err)
return false, nil
}
if state == "running" {
e2e.Logf("The instance is running")
return true, nil
}
if state == "stopped" {
err = a.StartInstance(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be started.", state)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance is not in a state from which it can be started."))
}
func stopInstanceOnAWS(a *exutil.AwsClient, hostname string) {
instanceID, err := a.GetAwsInstanceIDFromHostname(hostname)
o.Expect(err).NotTo(o.HaveOccurred())
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
state, err := a.GetAwsInstanceState(instanceID)
if err != nil {
e2e.Logf("%v", err)
return false, nil
}
if state == "stopped" {
e2e.Logf("The instance is already stopped.")
return true, nil
}
if state == "running" {
err = a.StopInstance(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be stopped.", state)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance is not in a state from which it can be stopped."))
}
// Run timeout ssh connection test from AWS int-svc instance
func accessEgressNodeFromIntSvcInstanceOnAWS(a *exutil.AwsClient, oc *exutil.CLI, IPaddr string) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_AWS_USER")
if user == "" {
user = "core"
}
sshkey := os.Getenv("SSH_CLOUD_PRIV_KEY")
if sshkey == "" {
sshkey = "../internal/config/keys/openshift-qe.pem"
}
ips := getAwsIntSvcIPs(a, oc)
publicIP, ok := ips["publicIP"]
if !ok {
return "", fmt.Errorf("no public IP found for Int Svc instance")
}
cmd := fmt.Sprintf(`timeout 5 bash -c "</dev/tcp/%v/22"`, IPaddr)
sshClient := exutil.SshClient{User: user, Host: publicIP, Port: 22, PrivateKey: sshkey}
err := sshClient.Run(cmd)
if err != nil {
e2e.Logf("Failed to run %v: %v", cmd, err)
// Extract the return code from the err1 variable
if returnedErr, ok := err.(*ssh.ExitError); ok {
return fmt.Sprintf("%d", returnedErr.ExitStatus()), err
}
// IO problems, the return code was not sent back
return "", err
}
return "0", nil
}
func findIP(input string) []string {
numBlock := "(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])"
regexPattern := numBlock + "\\." + numBlock + "\\." + numBlock + "\\." + numBlock
regEx := regexp.MustCompile(regexPattern)
return regEx.FindAllString(input, -1)
}
func unique(s []string) []string {
inResult := make(map[string]bool)
var result []string
for _, str := range s {
if _, ok := inResult[str]; !ok {
inResult[str] = true
result = append(result, str)
}
}
return result
}
type azureCredentials struct {
AzureClientID string `json:"azure_client_id,omitempty"`
AzureClientSecret string `json:"azure_client_secret,omitempty"`
AzureSubscriptionID string `json:"azure_subscription_id,omitempty"`
AzureTenantID string `json:"azure_tenant_id,omitempty"`
}
// Get Azure credentials from cluster
func getAzureCredentialFromCluster(oc *exutil.CLI) error {
if exutil.CheckPlatform(oc) != "azure" {
g.Skip("it is not azure platform and can not get credential, and then skip it.")
}
credential, getSecErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o=jsonpath={.data}").Output()
if getSecErr != nil {
e2e.Logf("Cannot get credential from secret/azure-credentials with error : %v,", getSecErr)
return getSecErr
}
azureCreds := azureCredentials{}
unmarshalErr := json.Unmarshal([]byte(credential), &azureCreds)
if unmarshalErr != nil {
e2e.Logf("Unmarshal error : %v,", unmarshalErr)
return unmarshalErr
}
azureClientID, decodeACIDErr := base64.StdEncoding.DecodeString(azureCreds.AzureClientID)
if decodeACIDErr != nil {
e2e.Logf("Decode azureClientID error : %v ", decodeACIDErr)
return decodeACIDErr
}
azureClientSecret, decodeACSErr := base64.StdEncoding.DecodeString(azureCreds.AzureClientSecret)
if decodeACSErr != nil {
e2e.Logf("Decode azureClientSecret error: %v", decodeACSErr)
return decodeACSErr
}
azureSubscriptionID, decodeASIDErr := base64.StdEncoding.DecodeString(azureCreds.AzureSubscriptionID)
if decodeASIDErr != nil {
e2e.Logf("Decode azureSubscriptionID error: %v ", decodeASIDErr)
return decodeASIDErr
}
azureTenantID, decodeATIDErr := base64.StdEncoding.DecodeString(azureCreds.AzureTenantID)
if decodeATIDErr != nil {
e2e.Logf("Decode azureTenantID error : %v ", decodeATIDErr)
return decodeATIDErr
}
os.Setenv("AZURE_CLIENT_ID", string(azureClientID))
os.Setenv("AZURE_CLIENT_SECRET", string(azureClientSecret))
os.Setenv("AZURE_SUBSCRIPTION_ID", string(azureSubscriptionID))
os.Setenv("AZURE_TENANT_ID", string(azureTenantID))
e2e.Logf("Azure credentials successfully loaded.")
return nil
}
func getAzureResourceGroup(oc *exutil.CLI) (string, error) {
if exutil.CheckPlatform(oc) != "azure" {
return "", fmt.Errorf("it is not azure platform and can not get resource group")
}
credential, getCredErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o=jsonpath={.data.azure_resourcegroup}").Output()
if getCredErr != nil {
e2e.Logf("Cannot get credential from secret/azure-credentials with error : %v,", getCredErr)
return "", getCredErr
}
azureResourceGroup, rgErr := base64.StdEncoding.DecodeString(credential)
if rgErr != nil {
e2e.Logf("Cannot get resource group, error: %v", rgErr)
return "", rgErr
}
return string(azureResourceGroup), nil
}
func isAzurePrivate(oc *exutil.CLI) bool {
installConfig, err := runOcWithRetry(oc.AsAdmin(), "get", "cm", "cluster-config-v1", "-n", "kube-system", "-o=jsonpath={.data.install-config}")
if err != nil {
if strings.Contains(strings.ToLower(err.Error()), "i/o timeout") {
e2e.Logf("System issues with err=%v\n)", err)
return true
}
e2e.Logf("\nTry to get cm cluster-config-v1, but failed with error: %v \n", err)
return false
}
if strings.Contains(installConfig, "publish: Internal") && strings.Contains(installConfig, "outboundType: Loadbalancer") {
e2e.Logf("This is Azure Private cluster.")
return true
}
return false
}
func isAzureStack(oc *exutil.CLI) bool {
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.ToLower(cloudName) == "azurestackcloud" {
e2e.Logf("This is Azure Stack cluster.")
return true
}
return false
}
func getAzureIntSvcResrouceGroup(oc *exutil.CLI) (string, error) {
azureResourceGroup, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.networkResourceGroupName}").Output()
if err != nil {
e2e.Logf("Cannot get resource group, error: %v", err)
return "", err
}
return azureResourceGroup, nil
}
func getAzureIntSvcVMPrivateIP(oc *exutil.CLI, sess *exutil.AzureSession, rg string) (string, error) {
privateIP := ""
clusterPrefixName := exutil.GetClusterPrefixName(oc)
vmName := clusterPrefixName + "-int-svc"
privateIP, getPrivateIPErr := exutil.GetAzureVMPrivateIP(sess, rg, vmName)
if getPrivateIPErr != nil {
e2e.Logf("Cannot get private IP from int svc vm, error: %v", getPrivateIPErr)
return "", getPrivateIPErr
}
return privateIP, nil
}
func getAzureIntSvcVMPublicIP(oc *exutil.CLI, sess *exutil.AzureSession, rg string) (string, error) {
publicIP := ""
clusterPrefixName := exutil.GetClusterPrefixName(oc)
vmName := clusterPrefixName + "-int-svc"
publicIP, getPublicIPErr := exutil.GetAzureVMPublicIP(sess, rg, vmName)
if getPublicIPErr != nil {
e2e.Logf("Cannot get public IP from int svc vm, error: %v", getPublicIPErr)
return "", getPublicIPErr
}
return publicIP, nil
}
func installIPEchoServiceOnAzure(oc *exutil.CLI, sess *exutil.AzureSession, rg string) (string, error) {
user := "core"
sshkey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
command := "sudo netstat -ntlp | grep 9095 || sudo podman run --name ipecho -d -p 9095:80 quay.io/openshifttest/ip-echo:1.2.0"
e2e.Logf("Run command, %s \n", command)
privateIP, privateIPErr := getAzureIntSvcVMPrivateIP(oc, sess, rg)
if privateIPErr != nil || privateIP == "" {
return "", privateIPErr
}
publicIP, publicIPErr := getAzureIntSvcVMPublicIP(oc, sess, rg)
if publicIPErr != nil || publicIP == "" {
return "", publicIPErr
}
sshClient := exutil.SshClient{User: user, Host: publicIP, Port: 22, PrivateKey: sshkey}
err = sshClient.Run(command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, err)
return "", err
}
ipEchoURL := net.JoinHostPort(privateIP, "9095")
return ipEchoURL, nil
}
// Run timeout ssh connection test from Azure int-svc instance
func accessEgressNodeFromIntSvcInstanceOnAzure(sess *exutil.AzureSession, oc *exutil.CLI, rg string, IPaddr string) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_AZURE_USER")
if user == "" {
user = "core"
}
sshkey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
publicIP, publicIPErr := getAzureIntSvcVMPublicIP(oc, sess, rg)
if publicIPErr != nil || publicIP == "" {
return "", publicIPErr
}
cmd := fmt.Sprintf(`timeout 5 bash -c "</dev/tcp/%v/22"`, IPaddr)
sshClient := exutil.SshClient{User: user, Host: publicIP, Port: 22, PrivateKey: sshkey}
err = sshClient.Run(cmd)
if err != nil {
e2e.Logf("Failed to run %v: %v", cmd, err)
// Extract the return code from the err1 variable
if returnedErr, ok := err.(*ssh.ExitError); ok {
return fmt.Sprintf("%d", returnedErr.ExitStatus()), err
}
// IO problems, the return code was not sent back
return "", err
}
return "0", nil
}
// runOcWithRetry runs the oc command with up to 5 retries if a timeout error occurred while running the command.
func runOcWithRetry(oc *exutil.CLI, cmd string, args ...string) (string, error) {
var err error
var output string
maxRetries := 5
for numRetries := 0; numRetries < maxRetries; numRetries++ {
if numRetries > 0 {
e2e.Logf("Retrying oc command (retry count=%v/%v)", numRetries+1, maxRetries)
}
output, err = oc.Run(cmd).Args(args...).Output()
// If an error was found, either return the error, or retry if a timeout error was found.
if err != nil {
if strings.Contains(strings.ToLower(err.Error()), "i/o timeout") {
// Retry on "i/o timeout" errors
e2e.Logf("Warning: oc command encountered i/o timeout.\nerr=%v\n)", err)
continue
}
return output, err
}
// Break out of loop if no error.
break
}
return output, err
}
func createSnifferDaemonset(oc *exutil.CLI, ns, dsName, nodeLabel, labelKey, dstHost, phyInf string, dstPort int) (tcpDS *tcpdumpDaemonSet, err error) {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
tcpdumpDSTemplate := filepath.Join(buildPruningBaseDir, "tcpdump-daemonset-template.yaml")
_, err = runOcWithRetry(oc.AsAdmin().WithoutNamespace(), "adm", "policy", "add-scc-to-user", "privileged", fmt.Sprintf("system:serviceaccount:%s:default", ns))
o.Expect(err).NotTo(o.HaveOccurred())
tcpdumpDS := tcpdumpDaemonSet{
name: dsName,
template: tcpdumpDSTemplate,
namespace: ns,
nodeLabel: nodeLabel,
labelKey: labelKey,
phyInterface: phyInf,
dstPort: dstPort,
dstHost: dstHost,
}
dsErr := tcpdumpDS.createTcpdumpDS(oc)
if dsErr != nil {
return &tcpdumpDS, dsErr
}
platform := exutil.CheckPlatform(oc)
// Due to slowness associated with OpenStack cluster through PSI, add a little wait time before checking tcpdumpDS for OSP
if platform == "openstack" {
time.Sleep(30 * time.Second)
}
dsReadyErr := waitDaemonSetReady(oc, ns, tcpdumpDS.name)
if dsReadyErr != nil {
return &tcpdumpDS, dsReadyErr
}
return &tcpdumpDS, nil
}
// waitDaemonSetReady by checking if NumberReady == DesiredNumberScheduled.
func waitDaemonSetReady(oc *exutil.CLI, ns, dsName string) error {
desiredNumStr, scheduledErr := runOcWithRetry(oc.AsAdmin(), "get", "ds", dsName, "-n", ns, "-ojsonpath={.status.desiredNumberScheduled}")
if scheduledErr != nil {
return fmt.Errorf("Cannot get DesiredNumberScheduled for daemonset :%s", dsName)
}
desiredNum, convertErr := strconv.Atoi(desiredNumStr)
o.Expect(convertErr).NotTo(o.HaveOccurred())
dsErr := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
readyNumStr, readyErr := runOcWithRetry(oc.AsAdmin(), "get", "ds", dsName, "-n", ns, "-ojsonpath={.status.numberReady}")
o.Expect(readyErr).NotTo(o.HaveOccurred())
readyNum, convertErr := strconv.Atoi(readyNumStr)
o.Expect(convertErr).NotTo(o.HaveOccurred())
if desiredNum != readyNum || readyErr != nil || readyNum == 0 || desiredNum == 0 {
e2e.Logf("The DesiredNumberScheduled for daemonset :%v, ready number is %v, wait for next try.", desiredNum, readyNum)
return false, nil
}
e2e.Logf("The DesiredNumberScheduled for daemonset :%v, ready number is %v.", desiredNum, readyNum)
return true, nil
})
if dsErr != nil {
return fmt.Errorf("The daemonset :%s is not ready", dsName)
}
return nil
}
// checkMatchedIPs, match is true, expectIP is expected in logs,match is false, expectIP is NOT expected in logs
func checkMatchedIPs(oc *exutil.CLI, ns, dsName string, searchString, expectedIP string, match bool) error {
e2e.Logf("Expected egressIP hit egress node logs : %v", match)
matchErr := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
foundIPs, searchErr := getSnifferLogs(oc, ns, dsName, searchString)
o.Expect(searchErr).NotTo(o.HaveOccurred())
_, ok := foundIPs[expectedIP]
// Expect there are matched IPs
if match && !ok {
e2e.Logf("Waiting for the logs to be synced, try next round.")
return false, nil
}
//Expect there is no matched IP
if !match && ok {
e2e.Logf("Waiting for the logs to be synced, try next round.")
return false, nil
}
return true, nil
})
e2e.Logf("Checking expected result in tcpdump log got error message as: %v.", matchErr)
return matchErr
}
// getSnifferLogs scan sniffer logs and return the source IPs for the request.
func getSnifferLogs(oc *exutil.CLI, ns, dsName, searchString string) (map[string]int, error) {
snifferPods := getPodName(oc, ns, "name="+dsName)
var snifLogs string
for _, pod := range snifferPods {
log, err := runOcWithRetry(oc.AsAdmin(), "logs", pod, "-n", ns)
if err != nil {
return nil, err
}
snifLogs += "\n" + log
}
var ip string
snifferLogs := strings.Split(snifLogs, "\n")
matchedIPs := make(map[string]int)
if len(snifferLogs) > 0 {
for _, line := range snifferLogs {
if !strings.Contains(line, searchString) {
continue
}
e2e.Logf("Try to find source ip in this log line:\n %v", line)
matchLineSlice := strings.Fields(line)
ipPortSlice := strings.Split(matchLineSlice[9], ".")
e2e.Logf(matchLineSlice[9])
ip = strings.Join(ipPortSlice[:len(ipPortSlice)-1], ".")
e2e.Logf("Found source ip %s in this log line.", ip)
matchedIPs[ip]++
}
} else {
e2e.Logf("No new log generated!")
}
return matchedIPs, nil
}
func getRequestURL(domainName string) (string, string) {
randomStr := getRandomString()
url := fmt.Sprintf("curl -s http://%s/?request=%s --connect-timeout 5", domainName, randomStr)
return randomStr, url
}
func waitCloudPrivateIPconfigUpdate(oc *exutil.CLI, egressIP string, exist bool) {
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "vsphere") || strings.Contains(platform, "nutanix") {
e2e.Logf("Baremetal and Vsphere platform don't have cloudprivateipconfig, no need check cloudprivateipconfig!")
} else {
egressipErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
e2e.Logf("Wait for cloudprivateipconfig updated,expect %s exist: %v.", egressIP, exist)
output, err := runOcWithRetry(oc.AsAdmin(), "get", "cloudprivateipconfig", egressIP)
e2e.Logf(output)
if exist && err == nil && strings.Contains(output, egressIP) {
return true, nil
}
if !exist && err != nil && strings.Contains(output, "NotFound") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(egressipErr, "CloudprivateConfigIP was not updated as expected!")
}
}
// getSnifPhyInf Get physical interface
func getSnifPhyInf(oc *exutil.CLI, nodeName string) (string, error) {
var phyInf string
ifaceErr2 := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 15*time.Second, false, func(cxt context.Context) (bool, error) {
ifaceList2, ifaceErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
if ifaceErr != nil {
e2e.Logf("Debug node Error: %v", ifaceErr)
return false, nil
}
e2e.Logf(ifaceList2)
infList := strings.Split(ifaceList2, "\n")
for _, inf := range infList {
if strings.Contains(inf, "ovs-if-phys0") {
phyInf = strings.Fields(inf)[3]
}
}
return true, nil
})
return phyInf, ifaceErr2
}
// nslookDomainName get the first IP
func nslookDomainName(domainName string) string {
ips, err := net.LookupIP(domainName)
o.Expect(err).NotTo(o.HaveOccurred())
for _, ip := range ips {
if ip.To4() != nil {
return ip.String()
}
}
e2e.Logf("There is no IPv4 address for destination domain %s", domainName)
return ""
}
// verifyEgressIPinTCPDump Verify the EgressIP takes effect.
func verifyEgressIPinTCPDump(oc *exutil.CLI, pod, podNS, expectedEgressIP, dstHost, tcpdumpNS, tcpdumpName string, expectedOrNot bool) error {
egressipErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := e2eoutput.RunHostCmd(podNS, pod, url)
if checkMatchedIPs(oc, tcpdumpNS, tcpdumpName, randomStr, expectedEgressIP, expectedOrNot) != nil || err != nil {
e2e.Logf("Expected to find egressIP in tcpdump is: %v, did not get expected result in tcpdump log, try next round.", expectedOrNot)
return false, nil
}
return true, nil
})
return egressipErr
}
type instance struct {
nodeName string
oc *exutil.CLI
}
func (i *instance) GetName() string {
return i.nodeName
}
type ospInstance struct {
instance
ospObj exutil.Osp
}
// OspCredentials get creds of osp platform
func OspCredentials(oc *exutil.CLI) {
credentials, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/openstack-credentials", "-n", "kube-system", "-o", `jsonpath={.data.clouds\.yaml}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
credential, err := base64.StdEncoding.DecodeString(credentials)
o.Expect(err).NotTo(o.HaveOccurred())
var (
username string
password string
projectID string
authURL string
userDomainName string
regionName string
projectName string
)
credVars := []string{"auth_url", "username", "password", "project_id", "user_domain_name", "region_name", "project_name"}
for _, s := range credVars {
r, _ := regexp.Compile(`` + s + `:.*`)
match := r.FindAllString(string(credential), -1)
if strings.Contains(s, "username") {
username = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_USERNAME", username)
}
if strings.Contains(s, "password") {
password = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_PASSWORD", password)
}
if strings.Contains(s, "auth_url") {
authURL = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_AUTH_URL", authURL)
}
if strings.Contains(s, "project_id") {
projectID = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_PROJECT_ID", projectID)
}
if strings.Contains(s, "user_domain_name") {
userDomainName = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_USER_DOMAIN_NAME", userDomainName)
}
if strings.Contains(s, "region_name") {
regionName = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_REGION_NAME", regionName)
}
if strings.Contains(s, "project_name") {
projectName = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_PROJECT_NAME", projectName)
}
}
}
// VsphereCloudClient pass env details to login function, and used to login
func VsphereCloudClient(oc *exutil.CLI) (*exutil.Vmware, *govmomi.Client) {
credential, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/vsphere-creds", "-n", "kube-system", "-o", `jsonpath={.data}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
output := gjson.Parse(credential).Value().(map[string]interface{})
var accessKeyIDBase64 string
var secureKeyBase64 string
for key, value := range output {
if strings.Contains(key, "username") {
accessKeyIDBase64 = fmt.Sprint(value)
} else if strings.Contains(key, "password") {
secureKeyBase64 = fmt.Sprint(value)
}
}
accessKeyID, err1 := base64.StdEncoding.DecodeString(accessKeyIDBase64)
o.Expect(err1).NotTo(o.HaveOccurred())
secureKey, err2 := base64.StdEncoding.DecodeString(secureKeyBase64)
o.Expect(err2).NotTo(o.HaveOccurred())
cloudConfig, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/cloud-provider-config", "-n", "openshift-config", "-o", `jsonpath={.data.config}`).OutputToFile("vsphere.ini")
o.Expect(err3).NotTo(o.HaveOccurred())
cmd := fmt.Sprintf(`grep -i server "%v" | awk -F '"' '{print $2}'`, cloudConfig)
serverURL, err4 := exec.Command("bash", "-c", cmd).Output()
e2e.Logf("\n serverURL: %s \n", string(serverURL))
o.Expect(err4).NotTo(o.HaveOccurred())
envUsername := string(accessKeyID)
envPassword := string(secureKey)
envURL := string(serverURL)
envURL = strings.TrimSuffix(envURL, "\n")
encodedPassword := url.QueryEscape(envPassword)
govmomiURL := fmt.Sprintf("https://%s:%s@%s/sdk", envUsername, encodedPassword, envURL)
vmware := exutil.Vmware{GovmomiURL: govmomiURL}
return vmware.Login()
}
// startVMOnAzure start one Azure VM
func startVMOnAzure(az *exutil.AzureSession, nodeName, rg string) {
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
vmState, stateErr := exutil.GetAzureVMInstanceState(az, nodeName, rg)
if stateErr != nil {
e2e.Logf("%v", stateErr)
return false, nil
}
if strings.EqualFold(vmState, "poweredOn") || strings.EqualFold(vmState, "running") || strings.EqualFold(vmState, "active") || strings.EqualFold(vmState, "ready") {
e2e.Logf("The instance has been started with state:%s !", vmState)
return true, nil
}
if strings.EqualFold(vmState, "poweredOff") || strings.EqualFold(vmState, "stopped") || strings.EqualFold(vmState, "paused") || strings.EqualFold(vmState, "notready") {
e2e.Logf("Start instance %s\n", nodeName)
_, err := exutil.StartAzureVM(az, nodeName, rg)
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be started.", vmState)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance %s is not in a state from which it can be started.", nodeName))
}
// stopVMOnAzure stop one Azure VM
func stopVMOnAzure(az *exutil.AzureSession, nodeName, rg string) {
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
vmState, stateErr := exutil.GetAzureVMInstanceState(az, nodeName, rg)
if stateErr != nil {
e2e.Logf("%v", stateErr)
return false, nil
}
if strings.EqualFold(vmState, "poweredoff") || strings.EqualFold(vmState, "stopped") || strings.EqualFold(vmState, "stopping") || strings.EqualFold(vmState, "paused") || strings.EqualFold(vmState, "pausing") || strings.EqualFold(vmState, "deallocated") || strings.EqualFold(vmState, "notready") {
e2e.Logf("The instance %s has been stopped already, and now is with state:%s !", nodeName, vmState)
return true, nil
}
if strings.EqualFold(vmState, "poweredOn") || strings.EqualFold(vmState, "running") || strings.EqualFold(vmState, "active") || strings.EqualFold(vmState, "ready") {
e2e.Logf("Stop instance %s\n", nodeName)
_, err := exutil.StopAzureVM(az, nodeName, rg)
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be stopped.", vmState)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance %s is not in a state from which it can be stopped.", nodeName))
}
func verifyEgressIPWithIPEcho(oc *exutil.CLI, podNS, podName, ipEchoURL string, hit bool, expectedIPs ...string) {
timeout := estimateTimeoutForEgressIP(oc)
if hit {
egressErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
sourceIP, err := e2eoutput.RunHostCmd(podNS, podName, "curl -s "+ipEchoURL+" --connect-timeout 5")
if err != nil {
e2e.Logf("error,%v", err)
return false, nil
}
if !contains(expectedIPs, sourceIP) {
e2e.Logf("Not expected IP,soure IP is %s", sourceIP)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressErr, fmt.Sprintf("sourceIP was not included in %v", expectedIPs))
} else {
egressErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
sourceIP, err := e2eoutput.RunHostCmd(podNS, podName, "curl -s "+ipEchoURL+" --connect-timeout 5")
if err != nil {
e2e.Logf("error,%v", err)
return false, nil
}
if contains(expectedIPs, sourceIP) {
e2e.Logf("Not expected IP,soure IP is %s", sourceIP)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressErr, fmt.Sprintf("sourceIP was still included in %v", expectedIPs))
}
}
func verifyExpectedEIPNumInEIPObject(oc *exutil.CLI, egressIPObject string, expectedNumber int) {
timeout := estimateTimeoutForEgressIP(oc)
egressErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressIPObject)
if len(egressIPMaps1) != expectedNumber {
e2e.Logf("Current EgressIP object length is %v,but expected is %v \n", len(egressIPMaps1), expectedNumber)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressErr, fmt.Sprintf("Failed to get expected number egressIPs %v", expectedNumber))
}
func estimateTimeoutForEgressIP(oc *exutil.CLI) time.Duration {
// https://bugzilla.redhat.com/show_bug.cgi?id=2105801#c8
// https://issues.redhat.com/browse/OCPBUGS-684
// Due to above two bugs, Azure and openstack is much slower for egressIP taking effect after configuration.
timeout := 100 * time.Second
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "azure") || strings.Contains(platform, "openstack") {
timeout = 210 * time.Second
}
return timeout
}
// GetBmhNodeMachineConfig gets Machine Config for BM host node
func GetBmhNodeMachineConfig(oc *exutil.CLI, nodeName string) (string, error) {
provideIDOutput, bmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o", `jsonpath='{.spec.providerID}'`).Output()
o.Expect(bmhErr).NotTo(o.HaveOccurred())
bmh := strings.Split(provideIDOutput, "/")[4]
e2e.Logf("\n The baremetal host for the node is:%v\n", bmh)
return bmh, bmhErr
}
// stopVMOnIpiBM stop one IPI BM VM
func stopVMOnIPIBM(oc *exutil.CLI, nodeName string) error {
stopErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
vmInstance, err := GetBmhNodeMachineConfig(oc, nodeName)
if err != nil {
return false, nil
}
e2e.Logf("\n\n\n vmInstance for the node is: %v \n\n\n", vmInstance)
patch := `[{"op": "replace", "path": "/spec/online", "value": false}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", "openshift-machine-api", vmInstance, "--type=json", "-p", patch).Execute()
if patchErr != nil {
return false, nil
}
return true, nil
})
e2e.Logf("Not able to stop %s, got error: %v.", nodeName, stopErr)
return stopErr
}
// startVMOnIpiBM starts one IPI BM VM
func startVMOnIPIBM(oc *exutil.CLI, nodeName string) error {
startErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
vmInstance, err := GetBmhNodeMachineConfig(oc, nodeName)
if err != nil {
return false, nil
}
e2e.Logf("\n\n\n vmInstance for the node is: %v \n\n\n", vmInstance)
patch := `[{"op": "replace", "path": "/spec/online", "value": true}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", "openshift-machine-api", vmInstance, "--type=json", "-p", patch).Execute()
if patchErr != nil {
return false, nil
}
return true, nil
})
e2e.Logf("Not able to start %s, got error: %v.", nodeName, startErr)
return startErr
}
func specialPlatformCheck(oc *exutil.CLI) bool {
platform := exutil.CheckPlatform(oc)
specialPlatform := false
e2e.Logf("Check credential in kube-system to see if this cluster is a special STS cluster.")
switch platform {
case "aws":
credErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secrets", "-n", "kube-system", "aws-creds").Execute()
if credErr != nil {
specialPlatform = true
}
case "gcp":
credErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secrets", "-n", "kube-system", "gcp-credentials").Execute()
if credErr != nil {
specialPlatform = true
}
case "azure":
credErr := getAzureCredentialFromCluster(oc)
if credErr != nil {
specialPlatform = true
}
default:
e2e.Logf("Skip this check for other platforms that do not have special STS scenario.")
}
return specialPlatform
}
// Get cluster proxy IP
func getProxyIP(oc *exutil.CLI) string {
httpProxy, err := runOcWithRetry(oc.AsAdmin(), "get", "proxy", "cluster", "-o=jsonpath={.status.httpProxy}")
o.Expect(err).NotTo(o.HaveOccurred())
re := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`)
proxyIPs := re.FindAllString(httpProxy, -1)
if len(proxyIPs) == 0 {
return ""
}
return proxyIPs[0]
}
// getIPechoURLFromUPIPrivateVlanBM, this function is used for template upi-on-baremetal/versioned-installer-packet-http_proxy-private-vlan as IP echo was deployed as part of the template
func getIPechoURLFromUPIPrivateVlanBM(oc *exutil.CLI) string {
if checkProxy(oc) {
proxyIP := getProxyIP(oc)
if proxyIP == "" {
return ""
}
ipEchoURL := net.JoinHostPort(proxyIP, "9095")
workNode, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
_, curlErr := exutil.DebugNode(oc, workNode, "curl", "-s", ipEchoURL, "--connect-timeout", "5")
if curlErr == nil {
return ipEchoURL
}
}
return ""
}
func getClusterNetworkInfo(oc *exutil.CLI) (string, string) {
clusterNetworkInfoString, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network", "cluster", "-o=jsonpath={.spec.clusterNetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// match out network CIDR and hostPrefix
pattern := regexp.MustCompile(`\d+\.\d+\.\d+\.\d+\/\d+|\d+`)
clusterNetworkInfo := pattern.FindAllString(clusterNetworkInfoString, 2)
networkCIDR := clusterNetworkInfo[0]
hostPrefix := clusterNetworkInfo[1]
e2e.Logf("network CIDR: %v; hostPrefix: %v", networkCIDR, hostPrefix)
return networkCIDR, hostPrefix
}
// start one instance on Nutanix
func startInstanceOnNutanix(nutanix *exutil.NutanixClient, hostname string) {
instanceID, err := nutanix.GetNutanixVMUUID(hostname)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The instance %s UUID is :%s", hostname, instanceID)
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
state, err := nutanix.GetNutanixVMState(instanceID)
if err != nil {
e2e.Logf("Failed to get instance state %s, Error: %v", hostname, err)
return false, nil
}
if state == "ON" {
e2e.Logf("The instance %s is already running", hostname)
return true, nil
}
if state == "OFF" {
err = nutanix.ChangeNutanixVMState(instanceID, "ON")
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be started.", state)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance is not in a state from which it can be started."))
}
// stop one instance on Nutanix
func stopInstanceOnNutanix(nutanix *exutil.NutanixClient, hostname string) {
instanceID, err := nutanix.GetNutanixVMUUID(hostname)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The instance %s UUID is :%s", hostname, instanceID)
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
state, err := nutanix.GetNutanixVMState(instanceID)
if err != nil {
e2e.Logf("Failed to get instance state %s, Error: %v", hostname, err)
return false, nil
}
if state == "OFF" {
e2e.Logf("The instance is already stopped.")
return true, nil
}
if state == "ON" {
err = nutanix.ChangeNutanixVMState(instanceID, "OFF")
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be stopped.", state)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance is not in a state from which it can be stopped."))
}
func checkDisconnect(oc *exutil.CLI) bool {
workNode, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
curlCMD := "curl -I ifconfig.me --connect-timeout 5"
output, err := exutil.DebugNode(oc, workNode, "bash", "-c", curlCMD)
if !strings.Contains(output, "HTTP") || err != nil {
e2e.Logf("Unable to access the public Internet from the cluster.")
return true
}
e2e.Logf("Successfully connected to the public Internet from the cluster.")
return false
}
// get ibm powervs instance for an OCP node
func newIBMPowerInstance(oc *exutil.CLI, clientPowerVs *exutil.IBMPowerVsSession, ibmRegion, ibmVpcName, nodeName string) *ibmPowerVsInstance {
return &ibmPowerVsInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
clientPowerVs: clientPowerVs,
ibmRegion: ibmRegion,
ibmVpcName: ibmVpcName,
}
}
// start the ibm powervs instancce
func (ibmPws *ibmPowerVsInstance) Start() error {
instanceID, status, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
e2e.Logf("\n The ibmPowervs instance %s is currently in state: %s \n", ibmPws.nodeName, status)
if status == "active" {
e2e.Logf("The node is already in active state, no need to start it again\n")
return nil
}
return exutil.PerformInstanceActionOnPowerVs(ibmPws.clientPowerVs, instanceID, "start")
}
// stop the ibm powervs instance
func (ibmPws *ibmPowerVsInstance) Stop() error {
instanceID, status, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
e2e.Logf("\n The ibmPowervs instance %s is currently in state: %s \n", ibmPws.nodeName, status)
if status == "shutoff" {
e2e.Logf("The node is already in shutoff state, no need to stop it again\n")
return nil
}
return exutil.PerformInstanceActionOnPowerVs(ibmPws.clientPowerVs, instanceID, "stop")
}
func cfgRouteOnExternalHost(oc *exutil.CLI, host string, user string, pod string, ns string, externalIntf string) bool {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, pod, "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeIp := getNodeIPv4(oc, ns, nodeName)
podIP := getPodIPv4(oc, ns, pod)
routeCmd := "ip route add " + podIP + " via " + nodeIp + " dev " + externalIntf
err = sshRunCmd(host, user, routeCmd)
if err != nil {
e2e.Logf("send command %v fail with error info %v", routeCmd, err)
return false
} else {
return true
}
}
func rmRouteOnExternalHost(oc *exutil.CLI, host string, user string, pod string, ns string) {
var chkRes bool
podIP := getPodIPv4(oc, ns, pod)
routeCmd := "ip route delete " + podIP + " && " + "ip route"
ipRoute := podIP + "/32"
outPut, err := sshRunCmdOutPut(host, user, routeCmd)
if err != nil || strings.Contains(outPut, ipRoute) {
e2e.Logf("send command %v fail with error info %v", routeCmd, err)
chkRes = false
} else {
e2e.Logf("successfully removed the ip route %v, %v", podIP, outPut)
chkRes = true
}
o.Expect(chkRes).To(o.BeTrue())
}
func sshRunCmdOutPut(host string, user string, cmd string) (string, error) {
privateKey := os.Getenv("SSH_CLOUD_PRIV_KEY")
if privateKey == "" {
privateKey = "../internal/config/keys/openshift-qe.pem"
}
sshClient := exutil.SshClient{User: user, Host: host, Port: 22, PrivateKey: privateKey}
return sshClient.RunOutput(cmd)
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
54d762e0-1bf8-452b-8519-f81f71513aa1
|
createTcpdumpDS
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['tcpdumpDaemonSet']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func (ds *tcpdumpDaemonSet) createTcpdumpDS(oc *exutil.CLI) error {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", ds.template, "-p", "NAME="+ds.name, "NAMESPACE="+ds.namespace, "NODELABEL="+ds.nodeLabel, "LABELKEY="+ds.labelKey, "INF="+ds.phyInterface, "DSTPORT="+strconv.Itoa(ds.dstPort), "HOST="+ds.dstHost)
if err1 != nil {
e2e.Logf("Tcpdump daemonset created failed :%v, and try next round", err1)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("fail to create Tcpdump daemonset %v", ds.name)
}
return nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
47610ed4-dcb0-4158-a323-c9228e4e545e
|
deleteTcpdumpDS
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func deleteTcpdumpDS(oc *exutil.CLI, dsName, dsNS string) {
_, err := runOcWithRetry(oc.AsAdmin(), "delete", "ds", dsName, "-n", dsNS, "--ignore-not-found=true")
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
23406f17-ef37-4772-b4f2-79c1b0034179
|
getAwsCredentialFromCluster
|
['"encoding/base64"', '"encoding/json"', '"os"', '"github.com/tidwall/gjson"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getAwsCredentialFromCluster(oc *exutil.CLI) error {
if exutil.CheckPlatform(oc) != "aws" {
g.Skip("it is not aws platform and can not get credential, and then skip it.")
}
credential, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/aws-creds", "-n", "kube-system", "-o", "json").Output()
// Skip for sts and c2s clusters.
if err != nil {
e2e.Logf("Cannot get AWS basic auth credential,%v", err)
return err
}
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyIDBase64, secureKeyBase64 := gjson.Get(credential, `data.aws_access_key_id`).String(), gjson.Get(credential, `data.aws_secret_access_key`).String()
accessKeyID, err1 := base64.StdEncoding.DecodeString(accessKeyIDBase64)
o.Expect(err1).NotTo(o.HaveOccurred())
secureKey, err2 := base64.StdEncoding.DecodeString(secureKeyBase64)
o.Expect(err2).NotTo(o.HaveOccurred())
clusterRegion, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err3).NotTo(o.HaveOccurred())
os.Setenv("AWS_ACCESS_KEY_ID", string(accessKeyID))
os.Setenv("AWS_SECRET_ACCESS_KEY", string(secureKey))
os.Setenv("AWS_REGION", clusterRegion)
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
a09b081f-f5b5-49a4-b1c7-42aa73d1c51c
|
getAwsIntSvcInstanceID
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getAwsIntSvcInstanceID(a *exutil.AwsClient, oc *exutil.CLI) (string, error) {
clusterPrefixName := exutil.GetClusterPrefixName(oc)
instanceName := clusterPrefixName + "-int-svc"
instanceID, err := a.GetAwsInstanceID(instanceName)
if err != nil {
e2e.Logf("Get bastion instance id failed with error %v .", err)
return "", err
}
return instanceID, nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
9cdba676-cbed-48af-8eca-60e015211880
|
getAwsIntSvcIPs
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getAwsIntSvcIPs(a *exutil.AwsClient, oc *exutil.CLI) map[string]string {
instanceID, err := getAwsIntSvcInstanceID(a, oc)
o.Expect(err).NotTo(o.HaveOccurred())
ips, err := a.GetAwsIntIPs(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
return ips
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
62084ecf-3d85-490c-b77d-cb5570893762
|
updateAwsIntSvcSecurityRule
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func updateAwsIntSvcSecurityRule(a *exutil.AwsClient, oc *exutil.CLI, dstPort int64) {
instanceID, err := getAwsIntSvcInstanceID(a, oc)
o.Expect(err).NotTo(o.HaveOccurred())
err = a.UpdateAwsIntSecurityRule(instanceID, dstPort)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
1a79ed56-e7f4-4442-85dd-99e8d34af5d9
|
installIPEchoServiceOnAWS
|
['"fmt"', '"os"', '"net"']
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func installIPEchoServiceOnAWS(a *exutil.AwsClient, oc *exutil.CLI) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_AWS_USER")
if user == "" {
user = "core"
}
sshkey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
command := "sudo netstat -ntlp | grep 9095 || sudo podman run --name ipecho -d -p 9095:80 quay.io/openshifttest/ip-echo:1.2.0"
e2e.Logf("Run command", command)
ips := getAwsIntSvcIPs(a, oc)
publicIP, ok := ips["publicIP"]
if !ok {
return "", fmt.Errorf("no public IP found for Int Svc instance")
}
privateIP, ok := ips["privateIP"]
if !ok {
return "", fmt.Errorf("no private IP found for Int Svc instance")
}
sshClient := exutil.SshClient{User: user, Host: publicIP, Port: 22, PrivateKey: sshkey}
err = sshClient.Run(command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, err)
return "", err
}
updateAwsIntSvcSecurityRule(a, oc, 9095)
ipEchoURL := net.JoinHostPort(privateIP, "9095")
return ipEchoURL, nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
bea9f4a8-9fdf-464b-baa7-aa39959b5405
|
getIfaddrFromNode
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getIfaddrFromNode(nodeName string, oc *exutil.CLI) string {
egressIpconfig, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.annotations.cloud\\.network\\.openshift\\.io/egress-ipconfig}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The egressipconfig is %v", egressIpconfig)
if len(egressIpconfig) == 0 {
e2e.Logf("The node %s doesn't have egressIP annotation", nodeName)
return ""
}
ifaddr := strings.Split(egressIpconfig, "\"")[9]
e2e.Logf("The subnet of node %s is %v .", nodeName, ifaddr)
return ifaddr
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
dbebdeb4-336a-4b7d-a9e2-7c9e6fb771f3
|
getPrimaryIfaddrFromBMNode
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getPrimaryIfaddrFromBMNode(oc *exutil.CLI, nodeName string) (string, string) {
primaryIfaddr, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.annotations.k8s\\.ovn\\.org/node-primary-ifaddr}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The primaryIfaddr is %v for node %s", primaryIfaddr, nodeName)
var ipv4Ifaddr, ipv6Ifaddr string
tempSlice := strings.Split(primaryIfaddr, "\"")
ipStackType := checkIPStackType(oc)
switch ipStackType {
case "ipv4single":
o.Expect(len(tempSlice) > 3).Should(o.BeTrue())
ipv4Ifaddr = tempSlice[3]
e2e.Logf("The ipv4 subnet of node %s is %v .", nodeName, ipv4Ifaddr)
case "dualstack":
o.Expect(len(tempSlice) > 7).Should(o.BeTrue())
ipv4Ifaddr = tempSlice[3]
ipv6Ifaddr = tempSlice[7]
e2e.Logf("The ipv4 subnet of node %s is %v, ipv6 subnet is :%v", nodeName, ipv4Ifaddr, ipv6Ifaddr)
case "ipv6single":
o.Expect(len(tempSlice) > 3).Should(o.BeTrue())
ipv6Ifaddr = tempSlice[3]
e2e.Logf("The ipv6 subnet of node %s is %v .", nodeName, ipv6Ifaddr)
default:
e2e.Logf("Get ipStackType as %s", ipStackType)
g.Skip("Skip for not supported IP stack type!! ")
}
return ipv4Ifaddr, ipv6Ifaddr
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d20333db-5983-4830-a47b-53db4a149a8d
|
findUnUsedIPsOnNode
|
['"math/rand"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func findUnUsedIPsOnNode(oc *exutil.CLI, nodeName, cidr string, number int) []string {
ipRange, _ := Hosts(cidr)
var ipUnused = []string{}
//shuffle the ips slice
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(ipRange), func(i, j int) { ipRange[i], ipRange[j] = ipRange[j], ipRange[i] })
var err error
var podName string
var ns string
podName, err = exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
ns = "openshift-ovn-kubernetes"
for _, ip := range ipRange {
if len(ipUnused) < number {
pingCmd := "ping -c4 -t1 " + ip
msg, err := exutil.RemoteShPodWithBash(oc, ns, podName, pingCmd)
if err != nil && (strings.Contains(msg, "Destination Host Unreachable") || strings.Contains(msg, "100% packet loss")) {
e2e.Logf("%s is not used!\n", ip)
ipUnused = append(ipUnused, ip)
} else if err != nil {
break
}
} else {
break
}
}
return ipUnused
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
ceb1ba5c-126c-4726-ba48-c096c8e47c78
|
findFreeIPs
|
['"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func findFreeIPs(oc *exutil.CLI, nodeName string, number int) []string {
var freeIPs []string
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "vsphere") {
sub1, err := getDefaultSubnet(oc)
o.Expect(err).NotTo(o.HaveOccurred())
freeIPs = findUnUsedIPs(oc, sub1, number)
} else if strings.Contains(platform, "baremetal") || strings.Contains(platform, "none") || strings.Contains(platform, "nutanix") || strings.Contains(platform, "kubevirt") || strings.Contains(platform, "powervs") {
ipv4Sub, _ := getPrimaryIfaddrFromBMNode(oc, nodeName)
tempSlice := strings.Split(ipv4Sub, "/")
o.Expect(len(tempSlice) > 1).Should(o.BeTrue())
preFix, err := strconv.Atoi(tempSlice[1])
o.Expect(err).NotTo(o.HaveOccurred())
if preFix > 29 {
g.Skip("There might be no enough free IPs in current subnet, skip the test!!")
}
freeIPs = findUnUsedIPsOnNode(oc, nodeName, ipv4Sub, number)
} else {
sub1 := getIfaddrFromNode(nodeName, oc)
if len(sub1) == 0 && strings.Contains(platform, "gcp") {
g.Skip("Skip the tests as no egressIP annoatation on this platform nodes!!")
}
o.Expect(len(sub1) == 0).NotTo(o.BeTrue())
freeIPs = findUnUsedIPsOnNode(oc, nodeName, sub1, number)
}
return freeIPs
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
164d3a7d-c359-43b3-a51a-bdc5588037b9
|
findFreeIPsForCIDRs
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func findFreeIPsForCIDRs(oc *exutil.CLI, nodeName, cidr string, number int) []string {
var freeIPs []string
freeIPs = findUnUsedIPsOnNode(oc, nodeName, cidr, number)
o.Expect(len(freeIPs)).Should(o.Equal(number))
return freeIPs
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
7c798f88-d9bc-4a3d-b537-2bb4f09851de
|
findFreeIPv6s
|
['"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func findFreeIPv6s(oc *exutil.CLI, nodeName string, number int) []string {
var freeIPs []string
_, ipv6Sub := getPrimaryIfaddrFromBMNode(oc, nodeName)
tempSlice := strings.Split(ipv6Sub, "/")
o.Expect(len(tempSlice) > 1).Should(o.BeTrue())
preFix, err := strconv.Atoi(tempSlice[1])
o.Expect(err).NotTo(o.HaveOccurred())
if preFix > 126 {
g.Skip("There might be no enough free IPs in current subnet, skip the test!!")
}
freeIPs, err = findUnUsedIPv6(oc, ipv6Sub, number)
o.Expect(err).NotTo(o.HaveOccurred())
return freeIPs
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
ed42de72-44eb-4ecb-b185-9a30f6d626fd
|
execCommandInOVNPodOnNode
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func execCommandInOVNPodOnNode(oc *exutil.CLI, nodeName, command string) (string, error) {
ovnPodName, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
msg, err := exutil.RemoteShPodWithBash(oc, "openshift-ovn-kubernetes", ovnPodName, command)
if err != nil {
e2e.Logf("Execute ovn command failed with err:%v .", err)
return msg, err
}
return msg, nil
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
bf440fc2-1e5b-442b-950e-479d992053c6
|
execCommandInSDNPodOnNode
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func execCommandInSDNPodOnNode(oc *exutil.CLI, nodeName, command string) (string, error) {
sdnPodName, err := exutil.GetPodName(oc, "openshift-sdn", "app=sdn", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
msg, err := exutil.RemoteShPodWithBash(oc, "openshift-sdn", sdnPodName, command)
if err != nil {
e2e.Logf("Execute sdn command failed with err:%v .", err)
return msg, err
}
return msg, nil
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
1ec283ea-5217-456b-b161-9a47057f362d
|
getgcloudClient
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getgcloudClient(oc *exutil.CLI) *exutil.Gcloud {
if exutil.CheckPlatform(oc) != "gcp" {
g.Skip("it is not gcp platform!")
}
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if projectID != "openshift-qe" {
g.Skip("openshift-qe project is needed to execute this test case!")
}
gcloud := exutil.Gcloud{ProjectID: projectID}
return gcloud.Login()
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
dfb6b4c8-e0d5-49e5-a24a-e9cc44c02f4b
|
getIntSvcExternalIPFromGcp
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getIntSvcExternalIPFromGcp(oc *exutil.CLI, infraID string) (string, error) {
externalIP, err := getgcloudClient(oc).GetIntSvcExternalIP(infraID)
e2e.Logf("Additional VM external ip: %s", externalIP)
return externalIP, err
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
ecfc98a4-cb03-46f5-826d-05f0eb8eb322
|
installIPEchoServiceOnGCP
|
['"fmt"', '"os"', '"strings"', '"net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func installIPEchoServiceOnGCP(oc *exutil.CLI, infraID string, host string) (string, error) {
e2e.Logf("Infra id: %s, install ipecho service on host %s", infraID, host)
// Run ip-echo service on the additional VM
serviceName := "ip-echo"
internalIP, err := getgcloudClient(oc).GetIntSvcInternalIP(infraID)
o.Expect(err).NotTo(o.HaveOccurred())
port := "9095"
runIPEcho := fmt.Sprintf("sudo netstat -ntlp | grep %s || sudo podman run --name %s -d -p %s:80 quay.io/openshifttest/ip-echo:1.2.0", port, serviceName, port)
user := os.Getenv("SSH_CLOUD_PRIV_GCP_USER")
if user == "" {
user = "core"
}
err = sshRunCmd(host, user, runIPEcho)
if err != nil {
e2e.Logf("Failed to run %v: %v", runIPEcho, err)
return "", err
}
// Update firewall rules to expose ip-echo service
ruleName := fmt.Sprintf("%s-int-svc-ingress-allow", infraID)
ports, err := getgcloudClient(oc).GetFirewallAllowPorts(ruleName)
if err != nil {
e2e.Logf("Failed to update firewall rules for port %v: %v", ports, err)
return "", err
}
if !strings.Contains(ports, "tcp:"+port) {
addIPEchoPort := fmt.Sprintf("%s,tcp:%s", ports, port)
updateFirewallPortErr := getgcloudClient(oc).UpdateFirewallAllowPorts(ruleName, addIPEchoPort)
if updateFirewallPortErr != nil {
return "", updateFirewallPortErr
}
e2e.Logf("Allow Ports: %s", addIPEchoPort)
}
ipEchoURL := net.JoinHostPort(internalIP, port)
return ipEchoURL, nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
b106b60b-4326-4f87-bc1e-3f4bfdd94e7f
|
uninstallIPEchoServiceOnGCP
|
['"fmt"', '"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func uninstallIPEchoServiceOnGCP(oc *exutil.CLI) {
infraID, err := exutil.GetInfraID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
host, err := getIntSvcExternalIPFromGcp(oc, infraID)
o.Expect(err).NotTo(o.HaveOccurred())
//Remove ip-echo service
user := os.Getenv("SSH_CLOUD_PRIV_GCP_USER")
if user == "" {
user = "cloud-user"
}
o.Expect(sshRunCmd(host, user, "sudo podman rm ip-echo -f")).NotTo(o.HaveOccurred())
//Update firewall rules
ruleName := fmt.Sprintf("%s-int-svc-ingress-allow", infraID)
ports, err := getgcloudClient(oc).GetFirewallAllowPorts(ruleName)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(ports, "tcp:9095") {
updatedPorts := strings.Replace(ports, ",tcp:9095", "", -1)
o.Expect(getgcloudClient(oc).UpdateFirewallAllowPorts(ruleName, updatedPorts)).NotTo(o.HaveOccurred())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d0aa467c-6784-4051-9399-0bf4b917a873
|
getZoneOfInstanceFromGcp
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getZoneOfInstanceFromGcp(oc *exutil.CLI, infraID string, workerName string) (string, error) {
zone, err := getgcloudClient(oc).GetZone(infraID, workerName)
e2e.Logf("zone for instance %v is: %s", workerName, zone)
return zone, err
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
1cb2f960-2e5e-42bd-8979-d9ca40f847c9
|
startInstanceOnGcp
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func startInstanceOnGcp(oc *exutil.CLI, nodeName string, zone string) error {
err := getgcloudClient(oc).StartInstance(nodeName, zone)
return err
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
99c49949-b8d7-46e2-87d6-3870cdcaced8
|
stopInstanceOnGcp
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func stopInstanceOnGcp(oc *exutil.CLI, nodeName string, zone string) error {
err := getgcloudClient(oc).StopInstance(nodeName, zone)
return err
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
9d082534-0b75-4914-b8af-5ed59fd9a429
|
accessEgressNodeFromIntSvcInstanceOnGCP
|
['"fmt"', '"os"', '"golang.org/x/crypto/ssh"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func accessEgressNodeFromIntSvcInstanceOnGCP(host string, IPaddr string) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_GCP_USER")
if user == "" {
user = "core"
}
cmd := fmt.Sprintf(`timeout 5 bash -c "</dev/tcp/%v/22"`, IPaddr)
err := sshRunCmd(host, user, cmd)
if err != nil {
e2e.Logf("Failed to run %v: %v", cmd, err)
// Extract the return code from the err1 variable
if returnedErr, ok := err.(*ssh.ExitError); ok {
return fmt.Sprintf("%d", returnedErr.ExitStatus()), err
}
// IO problems, the return code was not sent back
return "", err
}
return "0", nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
70934c83-32e7-4cdc-b61d-8b344205ecb6
|
startInstanceOnAWS
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func startInstanceOnAWS(a *exutil.AwsClient, hostname string) {
instanceID, err := a.GetAwsInstanceIDFromHostname(hostname)
o.Expect(err).NotTo(o.HaveOccurred())
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
state, err := a.GetAwsInstanceState(instanceID)
if err != nil {
e2e.Logf("%v", err)
return false, nil
}
if state == "running" {
e2e.Logf("The instance is running")
return true, nil
}
if state == "stopped" {
err = a.StartInstance(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be started.", state)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance is not in a state from which it can be started."))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
f31f828e-59ed-46f2-bf26-b94b7b58eeda
|
stopInstanceOnAWS
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func stopInstanceOnAWS(a *exutil.AwsClient, hostname string) {
instanceID, err := a.GetAwsInstanceIDFromHostname(hostname)
o.Expect(err).NotTo(o.HaveOccurred())
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
state, err := a.GetAwsInstanceState(instanceID)
if err != nil {
e2e.Logf("%v", err)
return false, nil
}
if state == "stopped" {
e2e.Logf("The instance is already stopped.")
return true, nil
}
if state == "running" {
err = a.StopInstance(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be stopped.", state)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance is not in a state from which it can be stopped."))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
223b500c-43c7-4be1-aa4e-6c3afab02a8e
|
accessEgressNodeFromIntSvcInstanceOnAWS
|
['"fmt"', '"os"', '"golang.org/x/crypto/ssh"']
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func accessEgressNodeFromIntSvcInstanceOnAWS(a *exutil.AwsClient, oc *exutil.CLI, IPaddr string) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_AWS_USER")
if user == "" {
user = "core"
}
sshkey := os.Getenv("SSH_CLOUD_PRIV_KEY")
if sshkey == "" {
sshkey = "../internal/config/keys/openshift-qe.pem"
}
ips := getAwsIntSvcIPs(a, oc)
publicIP, ok := ips["publicIP"]
if !ok {
return "", fmt.Errorf("no public IP found for Int Svc instance")
}
cmd := fmt.Sprintf(`timeout 5 bash -c "</dev/tcp/%v/22"`, IPaddr)
sshClient := exutil.SshClient{User: user, Host: publicIP, Port: 22, PrivateKey: sshkey}
err := sshClient.Run(cmd)
if err != nil {
e2e.Logf("Failed to run %v: %v", cmd, err)
// Extract the return code from the err1 variable
if returnedErr, ok := err.(*ssh.ExitError); ok {
return fmt.Sprintf("%d", returnedErr.ExitStatus()), err
}
// IO problems, the return code was not sent back
return "", err
}
return "0", nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
5ab29c4d-1fcb-480c-a8b4-73854b555e93
|
findIP
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func findIP(input string) []string {
numBlock := "(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])"
regexPattern := numBlock + "\\." + numBlock + "\\." + numBlock + "\\." + numBlock
regEx := regexp.MustCompile(regexPattern)
return regEx.FindAllString(input, -1)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7a605734-34e3-4f28-819a-b5af7a5690d5
|
unique
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func unique(s []string) []string {
inResult := make(map[string]bool)
var result []string
for _, str := range s {
if _, ok := inResult[str]; !ok {
inResult[str] = true
result = append(result, str)
}
}
return result
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
8eb0c77d-7d48-403f-8741-166a6729ee22
|
getAzureCredentialFromCluster
|
['"encoding/base64"', '"encoding/json"', '"os"']
|
['azureCredentials']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getAzureCredentialFromCluster(oc *exutil.CLI) error {
if exutil.CheckPlatform(oc) != "azure" {
g.Skip("it is not azure platform and can not get credential, and then skip it.")
}
credential, getSecErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o=jsonpath={.data}").Output()
if getSecErr != nil {
e2e.Logf("Cannot get credential from secret/azure-credentials with error : %v,", getSecErr)
return getSecErr
}
azureCreds := azureCredentials{}
unmarshalErr := json.Unmarshal([]byte(credential), &azureCreds)
if unmarshalErr != nil {
e2e.Logf("Unmarshal error : %v,", unmarshalErr)
return unmarshalErr
}
azureClientID, decodeACIDErr := base64.StdEncoding.DecodeString(azureCreds.AzureClientID)
if decodeACIDErr != nil {
e2e.Logf("Decode azureClientID error : %v ", decodeACIDErr)
return decodeACIDErr
}
azureClientSecret, decodeACSErr := base64.StdEncoding.DecodeString(azureCreds.AzureClientSecret)
if decodeACSErr != nil {
e2e.Logf("Decode azureClientSecret error: %v", decodeACSErr)
return decodeACSErr
}
azureSubscriptionID, decodeASIDErr := base64.StdEncoding.DecodeString(azureCreds.AzureSubscriptionID)
if decodeASIDErr != nil {
e2e.Logf("Decode azureSubscriptionID error: %v ", decodeASIDErr)
return decodeASIDErr
}
azureTenantID, decodeATIDErr := base64.StdEncoding.DecodeString(azureCreds.AzureTenantID)
if decodeATIDErr != nil {
e2e.Logf("Decode azureTenantID error : %v ", decodeATIDErr)
return decodeATIDErr
}
os.Setenv("AZURE_CLIENT_ID", string(azureClientID))
os.Setenv("AZURE_CLIENT_SECRET", string(azureClientSecret))
os.Setenv("AZURE_SUBSCRIPTION_ID", string(azureSubscriptionID))
os.Setenv("AZURE_TENANT_ID", string(azureTenantID))
e2e.Logf("Azure credentials successfully loaded.")
return nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
5e726566-6259-4586-8b69-4373e45561f0
|
getAzureResourceGroup
|
['"encoding/base64"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getAzureResourceGroup(oc *exutil.CLI) (string, error) {
if exutil.CheckPlatform(oc) != "azure" {
return "", fmt.Errorf("it is not azure platform and can not get resource group")
}
credential, getCredErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o=jsonpath={.data.azure_resourcegroup}").Output()
if getCredErr != nil {
e2e.Logf("Cannot get credential from secret/azure-credentials with error : %v,", getCredErr)
return "", getCredErr
}
azureResourceGroup, rgErr := base64.StdEncoding.DecodeString(credential)
if rgErr != nil {
e2e.Logf("Cannot get resource group, error: %v", rgErr)
return "", rgErr
}
return string(azureResourceGroup), nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
3e8433ee-2d04-4787-a3b6-669489ac2e90
|
isAzurePrivate
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func isAzurePrivate(oc *exutil.CLI) bool {
installConfig, err := runOcWithRetry(oc.AsAdmin(), "get", "cm", "cluster-config-v1", "-n", "kube-system", "-o=jsonpath={.data.install-config}")
if err != nil {
if strings.Contains(strings.ToLower(err.Error()), "i/o timeout") {
e2e.Logf("System issues with err=%v\n)", err)
return true
}
e2e.Logf("\nTry to get cm cluster-config-v1, but failed with error: %v \n", err)
return false
}
if strings.Contains(installConfig, "publish: Internal") && strings.Contains(installConfig, "outboundType: Loadbalancer") {
e2e.Logf("This is Azure Private cluster.")
return true
}
return false
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
609b3b71-439e-474a-8997-02dc2ea19935
|
isAzureStack
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func isAzureStack(oc *exutil.CLI) bool {
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.ToLower(cloudName) == "azurestackcloud" {
e2e.Logf("This is Azure Stack cluster.")
return true
}
return false
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
8fe7752c-0914-4c59-9e30-4aa564815698
|
getAzureIntSvcResrouceGroup
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getAzureIntSvcResrouceGroup(oc *exutil.CLI) (string, error) {
azureResourceGroup, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.networkResourceGroupName}").Output()
if err != nil {
e2e.Logf("Cannot get resource group, error: %v", err)
return "", err
}
return azureResourceGroup, nil
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
3407357c-8f92-4fc5-b080-fbac987df76e
|
getAzureIntSvcVMPrivateIP
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getAzureIntSvcVMPrivateIP(oc *exutil.CLI, sess *exutil.AzureSession, rg string) (string, error) {
privateIP := ""
clusterPrefixName := exutil.GetClusterPrefixName(oc)
vmName := clusterPrefixName + "-int-svc"
privateIP, getPrivateIPErr := exutil.GetAzureVMPrivateIP(sess, rg, vmName)
if getPrivateIPErr != nil {
e2e.Logf("Cannot get private IP from int svc vm, error: %v", getPrivateIPErr)
return "", getPrivateIPErr
}
return privateIP, nil
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
e3bb1bc6-f6ca-424e-b4b6-2de70d27f691
|
getAzureIntSvcVMPublicIP
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getAzureIntSvcVMPublicIP(oc *exutil.CLI, sess *exutil.AzureSession, rg string) (string, error) {
publicIP := ""
clusterPrefixName := exutil.GetClusterPrefixName(oc)
vmName := clusterPrefixName + "-int-svc"
publicIP, getPublicIPErr := exutil.GetAzureVMPublicIP(sess, rg, vmName)
if getPublicIPErr != nil {
e2e.Logf("Cannot get public IP from int svc vm, error: %v", getPublicIPErr)
return "", getPublicIPErr
}
return publicIP, nil
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
38907cac-0e51-4efc-b4f0-1cf7ca68fb4d
|
installIPEchoServiceOnAzure
|
['"net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func installIPEchoServiceOnAzure(oc *exutil.CLI, sess *exutil.AzureSession, rg string) (string, error) {
user := "core"
sshkey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
command := "sudo netstat -ntlp | grep 9095 || sudo podman run --name ipecho -d -p 9095:80 quay.io/openshifttest/ip-echo:1.2.0"
e2e.Logf("Run command, %s \n", command)
privateIP, privateIPErr := getAzureIntSvcVMPrivateIP(oc, sess, rg)
if privateIPErr != nil || privateIP == "" {
return "", privateIPErr
}
publicIP, publicIPErr := getAzureIntSvcVMPublicIP(oc, sess, rg)
if publicIPErr != nil || publicIP == "" {
return "", publicIPErr
}
sshClient := exutil.SshClient{User: user, Host: publicIP, Port: 22, PrivateKey: sshkey}
err = sshClient.Run(command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, err)
return "", err
}
ipEchoURL := net.JoinHostPort(privateIP, "9095")
return ipEchoURL, nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
9f17dbf0-6979-4f6f-82fd-ee6d2d420e13
|
accessEgressNodeFromIntSvcInstanceOnAzure
|
['"fmt"', '"os"', '"golang.org/x/crypto/ssh"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func accessEgressNodeFromIntSvcInstanceOnAzure(sess *exutil.AzureSession, oc *exutil.CLI, rg string, IPaddr string) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_AZURE_USER")
if user == "" {
user = "core"
}
sshkey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
publicIP, publicIPErr := getAzureIntSvcVMPublicIP(oc, sess, rg)
if publicIPErr != nil || publicIP == "" {
return "", publicIPErr
}
cmd := fmt.Sprintf(`timeout 5 bash -c "</dev/tcp/%v/22"`, IPaddr)
sshClient := exutil.SshClient{User: user, Host: publicIP, Port: 22, PrivateKey: sshkey}
err = sshClient.Run(cmd)
if err != nil {
e2e.Logf("Failed to run %v: %v", cmd, err)
// Extract the return code from the err1 variable
if returnedErr, ok := err.(*ssh.ExitError); ok {
return fmt.Sprintf("%d", returnedErr.ExitStatus()), err
}
// IO problems, the return code was not sent back
return "", err
}
return "0", nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
8e00fde7-0ad2-4f4e-bea7-b6f2668803cc
|
runOcWithRetry
|
['"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func runOcWithRetry(oc *exutil.CLI, cmd string, args ...string) (string, error) {
var err error
var output string
maxRetries := 5
for numRetries := 0; numRetries < maxRetries; numRetries++ {
if numRetries > 0 {
e2e.Logf("Retrying oc command (retry count=%v/%v)", numRetries+1, maxRetries)
}
output, err = oc.Run(cmd).Args(args...).Output()
// If an error was found, either return the error, or retry if a timeout error was found.
if err != nil {
if strings.Contains(strings.ToLower(err.Error()), "i/o timeout") {
// Retry on "i/o timeout" errors
e2e.Logf("Warning: oc command encountered i/o timeout.\nerr=%v\n)", err)
continue
}
return output, err
}
// Break out of loop if no error.
break
}
return output, err
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c26f893e-0a8a-4136-a902-26b51793cb36
|
createSnifferDaemonset
|
['"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['tcpdumpDaemonSet']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func createSnifferDaemonset(oc *exutil.CLI, ns, dsName, nodeLabel, labelKey, dstHost, phyInf string, dstPort int) (tcpDS *tcpdumpDaemonSet, err error) {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
tcpdumpDSTemplate := filepath.Join(buildPruningBaseDir, "tcpdump-daemonset-template.yaml")
_, err = runOcWithRetry(oc.AsAdmin().WithoutNamespace(), "adm", "policy", "add-scc-to-user", "privileged", fmt.Sprintf("system:serviceaccount:%s:default", ns))
o.Expect(err).NotTo(o.HaveOccurred())
tcpdumpDS := tcpdumpDaemonSet{
name: dsName,
template: tcpdumpDSTemplate,
namespace: ns,
nodeLabel: nodeLabel,
labelKey: labelKey,
phyInterface: phyInf,
dstPort: dstPort,
dstHost: dstHost,
}
dsErr := tcpdumpDS.createTcpdumpDS(oc)
if dsErr != nil {
return &tcpdumpDS, dsErr
}
platform := exutil.CheckPlatform(oc)
// Due to slowness associated with OpenStack cluster through PSI, add a little wait time before checking tcpdumpDS for OSP
if platform == "openstack" {
time.Sleep(30 * time.Second)
}
dsReadyErr := waitDaemonSetReady(oc, ns, tcpdumpDS.name)
if dsReadyErr != nil {
return &tcpdumpDS, dsReadyErr
}
return &tcpdumpDS, nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
ef8deb2b-97f2-4578-ac73-ec41f437cd06
|
waitDaemonSetReady
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func waitDaemonSetReady(oc *exutil.CLI, ns, dsName string) error {
desiredNumStr, scheduledErr := runOcWithRetry(oc.AsAdmin(), "get", "ds", dsName, "-n", ns, "-ojsonpath={.status.desiredNumberScheduled}")
if scheduledErr != nil {
return fmt.Errorf("Cannot get DesiredNumberScheduled for daemonset :%s", dsName)
}
desiredNum, convertErr := strconv.Atoi(desiredNumStr)
o.Expect(convertErr).NotTo(o.HaveOccurred())
dsErr := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
readyNumStr, readyErr := runOcWithRetry(oc.AsAdmin(), "get", "ds", dsName, "-n", ns, "-ojsonpath={.status.numberReady}")
o.Expect(readyErr).NotTo(o.HaveOccurred())
readyNum, convertErr := strconv.Atoi(readyNumStr)
o.Expect(convertErr).NotTo(o.HaveOccurred())
if desiredNum != readyNum || readyErr != nil || readyNum == 0 || desiredNum == 0 {
e2e.Logf("The DesiredNumberScheduled for daemonset :%v, ready number is %v, wait for next try.", desiredNum, readyNum)
return false, nil
}
e2e.Logf("The DesiredNumberScheduled for daemonset :%v, ready number is %v.", desiredNum, readyNum)
return true, nil
})
if dsErr != nil {
return fmt.Errorf("The daemonset :%s is not ready", dsName)
}
return nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
06e566cd-4b5e-48e6-a7a5-1d69b31345fc
|
checkMatchedIPs
|
['"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func checkMatchedIPs(oc *exutil.CLI, ns, dsName string, searchString, expectedIP string, match bool) error {
e2e.Logf("Expected egressIP hit egress node logs : %v", match)
matchErr := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
foundIPs, searchErr := getSnifferLogs(oc, ns, dsName, searchString)
o.Expect(searchErr).NotTo(o.HaveOccurred())
_, ok := foundIPs[expectedIP]
// Expect there are matched IPs
if match && !ok {
e2e.Logf("Waiting for the logs to be synced, try next round.")
return false, nil
}
//Expect there is no matched IP
if !match && ok {
e2e.Logf("Waiting for the logs to be synced, try next round.")
return false, nil
}
return true, nil
})
e2e.Logf("Checking expected result in tcpdump log got error message as: %v.", matchErr)
return matchErr
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
58445089-a5c7-439d-af27-cfa2afd45f5e
|
getSnifferLogs
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getSnifferLogs(oc *exutil.CLI, ns, dsName, searchString string) (map[string]int, error) {
snifferPods := getPodName(oc, ns, "name="+dsName)
var snifLogs string
for _, pod := range snifferPods {
log, err := runOcWithRetry(oc.AsAdmin(), "logs", pod, "-n", ns)
if err != nil {
return nil, err
}
snifLogs += "\n" + log
}
var ip string
snifferLogs := strings.Split(snifLogs, "\n")
matchedIPs := make(map[string]int)
if len(snifferLogs) > 0 {
for _, line := range snifferLogs {
if !strings.Contains(line, searchString) {
continue
}
e2e.Logf("Try to find source ip in this log line:\n %v", line)
matchLineSlice := strings.Fields(line)
ipPortSlice := strings.Split(matchLineSlice[9], ".")
e2e.Logf(matchLineSlice[9])
ip = strings.Join(ipPortSlice[:len(ipPortSlice)-1], ".")
e2e.Logf("Found source ip %s in this log line.", ip)
matchedIPs[ip]++
}
} else {
e2e.Logf("No new log generated!")
}
return matchedIPs, nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d98bfab1-0274-4fb6-a1b4-be5ffaee6ba8
|
getRequestURL
|
['"fmt"', '"net/url"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getRequestURL(domainName string) (string, string) {
randomStr := getRandomString()
url := fmt.Sprintf("curl -s http://%s/?request=%s --connect-timeout 5", domainName, randomStr)
return randomStr, url
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
aac21878-1368-4022-98fb-988c93be55f5
|
waitCloudPrivateIPconfigUpdate
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func waitCloudPrivateIPconfigUpdate(oc *exutil.CLI, egressIP string, exist bool) {
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "baremetal") || strings.Contains(platform, "vsphere") || strings.Contains(platform, "nutanix") {
e2e.Logf("Baremetal and Vsphere platform don't have cloudprivateipconfig, no need check cloudprivateipconfig!")
} else {
egressipErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
e2e.Logf("Wait for cloudprivateipconfig updated,expect %s exist: %v.", egressIP, exist)
output, err := runOcWithRetry(oc.AsAdmin(), "get", "cloudprivateipconfig", egressIP)
e2e.Logf(output)
if exist && err == nil && strings.Contains(output, egressIP) {
return true, nil
}
if !exist && err != nil && strings.Contains(output, "NotFound") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(egressipErr, "CloudprivateConfigIP was not updated as expected!")
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d1cd0cf6-cf0f-4bc9-bd5a-0a6775272c5e
|
getSnifPhyInf
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getSnifPhyInf(oc *exutil.CLI, nodeName string) (string, error) {
var phyInf string
ifaceErr2 := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 15*time.Second, false, func(cxt context.Context) (bool, error) {
ifaceList2, ifaceErr := exutil.DebugNodeWithChroot(oc, nodeName, "nmcli", "con", "show")
if ifaceErr != nil {
e2e.Logf("Debug node Error: %v", ifaceErr)
return false, nil
}
e2e.Logf(ifaceList2)
infList := strings.Split(ifaceList2, "\n")
for _, inf := range infList {
if strings.Contains(inf, "ovs-if-phys0") {
phyInf = strings.Fields(inf)[3]
}
}
return true, nil
})
return phyInf, ifaceErr2
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7806291e-55f5-4ba2-a5ff-17fffb014178
|
nslookDomainName
|
['"net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func nslookDomainName(domainName string) string {
ips, err := net.LookupIP(domainName)
o.Expect(err).NotTo(o.HaveOccurred())
for _, ip := range ips {
if ip.To4() != nil {
return ip.String()
}
}
e2e.Logf("There is no IPv4 address for destination domain %s", domainName)
return ""
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
feca4663-4039-4553-bc12-fc3904b259bb
|
verifyEgressIPinTCPDump
|
['"net/url"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func verifyEgressIPinTCPDump(oc *exutil.CLI, pod, podNS, expectedEgressIP, dstHost, tcpdumpNS, tcpdumpName string, expectedOrNot bool) error {
egressipErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
randomStr, url := getRequestURL(dstHost)
_, err := e2eoutput.RunHostCmd(podNS, pod, url)
if checkMatchedIPs(oc, tcpdumpNS, tcpdumpName, randomStr, expectedEgressIP, expectedOrNot) != nil || err != nil {
e2e.Logf("Expected to find egressIP in tcpdump is: %v, did not get expected result in tcpdump log, try next round.", expectedOrNot)
return false, nil
}
return true, nil
})
return egressipErr
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
e6476787-f885-46ca-8a57-ed513e9f4477
|
GetName
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func (i *instance) GetName() string {
return i.nodeName
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
99525c59-870d-4cb7-be58-7233c46fb1b9
|
OspCredentials
|
['"encoding/base64"', '"os"', '"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func OspCredentials(oc *exutil.CLI) {
credentials, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/openstack-credentials", "-n", "kube-system", "-o", `jsonpath={.data.clouds\.yaml}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
credential, err := base64.StdEncoding.DecodeString(credentials)
o.Expect(err).NotTo(o.HaveOccurred())
var (
username string
password string
projectID string
authURL string
userDomainName string
regionName string
projectName string
)
credVars := []string{"auth_url", "username", "password", "project_id", "user_domain_name", "region_name", "project_name"}
for _, s := range credVars {
r, _ := regexp.Compile(`` + s + `:.*`)
match := r.FindAllString(string(credential), -1)
if strings.Contains(s, "username") {
username = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_USERNAME", username)
}
if strings.Contains(s, "password") {
password = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_PASSWORD", password)
}
if strings.Contains(s, "auth_url") {
authURL = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_AUTH_URL", authURL)
}
if strings.Contains(s, "project_id") {
projectID = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_PROJECT_ID", projectID)
}
if strings.Contains(s, "user_domain_name") {
userDomainName = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_USER_DOMAIN_NAME", userDomainName)
}
if strings.Contains(s, "region_name") {
regionName = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_REGION_NAME", regionName)
}
if strings.Contains(s, "project_name") {
projectName = strings.Split(match[0], " ")[1]
os.Setenv("OSP_DR_PROJECT_NAME", projectName)
}
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
d7c62e1d-9f51-4add-87fd-2e65c9195240
|
VsphereCloudClient
|
['"encoding/base64"', '"fmt"', '"net/url"', '"os/exec"', '"strings"', '"github.com/tidwall/gjson"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"', '"github.com/vmware/govmomi"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func VsphereCloudClient(oc *exutil.CLI) (*exutil.Vmware, *govmomi.Client) {
credential, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/vsphere-creds", "-n", "kube-system", "-o", `jsonpath={.data}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
output := gjson.Parse(credential).Value().(map[string]interface{})
var accessKeyIDBase64 string
var secureKeyBase64 string
for key, value := range output {
if strings.Contains(key, "username") {
accessKeyIDBase64 = fmt.Sprint(value)
} else if strings.Contains(key, "password") {
secureKeyBase64 = fmt.Sprint(value)
}
}
accessKeyID, err1 := base64.StdEncoding.DecodeString(accessKeyIDBase64)
o.Expect(err1).NotTo(o.HaveOccurred())
secureKey, err2 := base64.StdEncoding.DecodeString(secureKeyBase64)
o.Expect(err2).NotTo(o.HaveOccurred())
cloudConfig, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/cloud-provider-config", "-n", "openshift-config", "-o", `jsonpath={.data.config}`).OutputToFile("vsphere.ini")
o.Expect(err3).NotTo(o.HaveOccurred())
cmd := fmt.Sprintf(`grep -i server "%v" | awk -F '"' '{print $2}'`, cloudConfig)
serverURL, err4 := exec.Command("bash", "-c", cmd).Output()
e2e.Logf("\n serverURL: %s \n", string(serverURL))
o.Expect(err4).NotTo(o.HaveOccurred())
envUsername := string(accessKeyID)
envPassword := string(secureKey)
envURL := string(serverURL)
envURL = strings.TrimSuffix(envURL, "\n")
encodedPassword := url.QueryEscape(envPassword)
govmomiURL := fmt.Sprintf("https://%s:%s@%s/sdk", envUsername, encodedPassword, envURL)
vmware := exutil.Vmware{GovmomiURL: govmomiURL}
return vmware.Login()
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
bfef1c6b-c626-4e28-885a-7704e76bfda5
|
startVMOnAzure
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func startVMOnAzure(az *exutil.AzureSession, nodeName, rg string) {
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
vmState, stateErr := exutil.GetAzureVMInstanceState(az, nodeName, rg)
if stateErr != nil {
e2e.Logf("%v", stateErr)
return false, nil
}
if strings.EqualFold(vmState, "poweredOn") || strings.EqualFold(vmState, "running") || strings.EqualFold(vmState, "active") || strings.EqualFold(vmState, "ready") {
e2e.Logf("The instance has been started with state:%s !", vmState)
return true, nil
}
if strings.EqualFold(vmState, "poweredOff") || strings.EqualFold(vmState, "stopped") || strings.EqualFold(vmState, "paused") || strings.EqualFold(vmState, "notready") {
e2e.Logf("Start instance %s\n", nodeName)
_, err := exutil.StartAzureVM(az, nodeName, rg)
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be started.", vmState)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance %s is not in a state from which it can be started.", nodeName))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
189042c6-a8c6-4ff1-b447-991ec03ff3a6
|
stopVMOnAzure
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func stopVMOnAzure(az *exutil.AzureSession, nodeName, rg string) {
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
vmState, stateErr := exutil.GetAzureVMInstanceState(az, nodeName, rg)
if stateErr != nil {
e2e.Logf("%v", stateErr)
return false, nil
}
if strings.EqualFold(vmState, "poweredoff") || strings.EqualFold(vmState, "stopped") || strings.EqualFold(vmState, "stopping") || strings.EqualFold(vmState, "paused") || strings.EqualFold(vmState, "pausing") || strings.EqualFold(vmState, "deallocated") || strings.EqualFold(vmState, "notready") {
e2e.Logf("The instance %s has been stopped already, and now is with state:%s !", nodeName, vmState)
return true, nil
}
if strings.EqualFold(vmState, "poweredOn") || strings.EqualFold(vmState, "running") || strings.EqualFold(vmState, "active") || strings.EqualFold(vmState, "ready") {
e2e.Logf("Stop instance %s\n", nodeName)
_, err := exutil.StopAzureVM(az, nodeName, rg)
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be stopped.", vmState)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance %s is not in a state from which it can be stopped.", nodeName))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
8f84d041-581a-46b7-a917-0bb4b09f8fd9
|
verifyEgressIPWithIPEcho
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func verifyEgressIPWithIPEcho(oc *exutil.CLI, podNS, podName, ipEchoURL string, hit bool, expectedIPs ...string) {
timeout := estimateTimeoutForEgressIP(oc)
if hit {
egressErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
sourceIP, err := e2eoutput.RunHostCmd(podNS, podName, "curl -s "+ipEchoURL+" --connect-timeout 5")
if err != nil {
e2e.Logf("error,%v", err)
return false, nil
}
if !contains(expectedIPs, sourceIP) {
e2e.Logf("Not expected IP,soure IP is %s", sourceIP)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressErr, fmt.Sprintf("sourceIP was not included in %v", expectedIPs))
} else {
egressErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
sourceIP, err := e2eoutput.RunHostCmd(podNS, podName, "curl -s "+ipEchoURL+" --connect-timeout 5")
if err != nil {
e2e.Logf("error,%v", err)
return false, nil
}
if contains(expectedIPs, sourceIP) {
e2e.Logf("Not expected IP,soure IP is %s", sourceIP)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressErr, fmt.Sprintf("sourceIP was still included in %v", expectedIPs))
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
ee5b926e-989a-480f-9e44-2e4cecef7d08
|
verifyExpectedEIPNumInEIPObject
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func verifyExpectedEIPNumInEIPObject(oc *exutil.CLI, egressIPObject string, expectedNumber int) {
timeout := estimateTimeoutForEgressIP(oc)
egressErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
egressIPMaps1 := getAssignedEIPInEIPObject(oc, egressIPObject)
if len(egressIPMaps1) != expectedNumber {
e2e.Logf("Current EgressIP object length is %v,but expected is %v \n", len(egressIPMaps1), expectedNumber)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(egressErr, fmt.Sprintf("Failed to get expected number egressIPs %v", expectedNumber))
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
35357c67-00fd-4374-b1a8-8cb237d4bd3a
|
estimateTimeoutForEgressIP
|
['"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func estimateTimeoutForEgressIP(oc *exutil.CLI) time.Duration {
// https://bugzilla.redhat.com/show_bug.cgi?id=2105801#c8
// https://issues.redhat.com/browse/OCPBUGS-684
// Due to above two bugs, Azure and openstack is much slower for egressIP taking effect after configuration.
timeout := 100 * time.Second
platform := exutil.CheckPlatform(oc)
if strings.Contains(platform, "azure") || strings.Contains(platform, "openstack") {
timeout = 210 * time.Second
}
return timeout
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
52cc84d8-6e30-4e4b-bf71-07ff4ca8c60e
|
GetBmhNodeMachineConfig
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func GetBmhNodeMachineConfig(oc *exutil.CLI, nodeName string) (string, error) {
provideIDOutput, bmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o", `jsonpath='{.spec.providerID}'`).Output()
o.Expect(bmhErr).NotTo(o.HaveOccurred())
bmh := strings.Split(provideIDOutput, "/")[4]
e2e.Logf("\n The baremetal host for the node is:%v\n", bmh)
return bmh, bmhErr
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c3d77605-09d0-4dfd-9ba5-19385b9d3a3e
|
stopVMOnIPIBM
|
['"encoding/json"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func stopVMOnIPIBM(oc *exutil.CLI, nodeName string) error {
stopErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
vmInstance, err := GetBmhNodeMachineConfig(oc, nodeName)
if err != nil {
return false, nil
}
e2e.Logf("\n\n\n vmInstance for the node is: %v \n\n\n", vmInstance)
patch := `[{"op": "replace", "path": "/spec/online", "value": false}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", "openshift-machine-api", vmInstance, "--type=json", "-p", patch).Execute()
if patchErr != nil {
return false, nil
}
return true, nil
})
e2e.Logf("Not able to stop %s, got error: %v.", nodeName, stopErr)
return stopErr
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
158ca97d-b072-4e9d-98a1-5ead2b01067b
|
startVMOnIPIBM
|
['"encoding/json"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func startVMOnIPIBM(oc *exutil.CLI, nodeName string) error {
startErr := wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) {
vmInstance, err := GetBmhNodeMachineConfig(oc, nodeName)
if err != nil {
return false, nil
}
e2e.Logf("\n\n\n vmInstance for the node is: %v \n\n\n", vmInstance)
patch := `[{"op": "replace", "path": "/spec/online", "value": true}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", "openshift-machine-api", vmInstance, "--type=json", "-p", patch).Execute()
if patchErr != nil {
return false, nil
}
return true, nil
})
e2e.Logf("Not able to start %s, got error: %v.", nodeName, startErr)
return startErr
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
96c5ac57-a806-4517-8405-8cd0d4310ed8
|
specialPlatformCheck
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func specialPlatformCheck(oc *exutil.CLI) bool {
platform := exutil.CheckPlatform(oc)
specialPlatform := false
e2e.Logf("Check credential in kube-system to see if this cluster is a special STS cluster.")
switch platform {
case "aws":
credErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secrets", "-n", "kube-system", "aws-creds").Execute()
if credErr != nil {
specialPlatform = true
}
case "gcp":
credErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secrets", "-n", "kube-system", "gcp-credentials").Execute()
if credErr != nil {
specialPlatform = true
}
case "azure":
credErr := getAzureCredentialFromCluster(oc)
if credErr != nil {
specialPlatform = true
}
default:
e2e.Logf("Skip this check for other platforms that do not have special STS scenario.")
}
return specialPlatform
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
bf29334f-6b27-4700-812e-8ebfc4b97c38
|
getProxyIP
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getProxyIP(oc *exutil.CLI) string {
httpProxy, err := runOcWithRetry(oc.AsAdmin(), "get", "proxy", "cluster", "-o=jsonpath={.status.httpProxy}")
o.Expect(err).NotTo(o.HaveOccurred())
re := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`)
proxyIPs := re.FindAllString(httpProxy, -1)
if len(proxyIPs) == 0 {
return ""
}
return proxyIPs[0]
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
924b0c10-03f7-4ee6-9160-fdffb5534459
|
getIPechoURLFromUPIPrivateVlanBM
|
['"net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getIPechoURLFromUPIPrivateVlanBM(oc *exutil.CLI) string {
if checkProxy(oc) {
proxyIP := getProxyIP(oc)
if proxyIP == "" {
return ""
}
ipEchoURL := net.JoinHostPort(proxyIP, "9095")
workNode, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
_, curlErr := exutil.DebugNode(oc, workNode, "curl", "-s", ipEchoURL, "--connect-timeout", "5")
if curlErr == nil {
return ipEchoURL
}
}
return ""
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7f7a5ce1-134d-44eb-9ecd-a20e72382098
|
getClusterNetworkInfo
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func getClusterNetworkInfo(oc *exutil.CLI) (string, string) {
clusterNetworkInfoString, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network", "cluster", "-o=jsonpath={.spec.clusterNetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// match out network CIDR and hostPrefix
pattern := regexp.MustCompile(`\d+\.\d+\.\d+\.\d+\/\d+|\d+`)
clusterNetworkInfo := pattern.FindAllString(clusterNetworkInfoString, 2)
networkCIDR := clusterNetworkInfo[0]
hostPrefix := clusterNetworkInfo[1]
e2e.Logf("network CIDR: %v; hostPrefix: %v", networkCIDR, hostPrefix)
return networkCIDR, hostPrefix
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
4b20de42-d143-460c-9e04-4b95434c9f66
|
startInstanceOnNutanix
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func startInstanceOnNutanix(nutanix *exutil.NutanixClient, hostname string) {
instanceID, err := nutanix.GetNutanixVMUUID(hostname)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The instance %s UUID is :%s", hostname, instanceID)
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
state, err := nutanix.GetNutanixVMState(instanceID)
if err != nil {
e2e.Logf("Failed to get instance state %s, Error: %v", hostname, err)
return false, nil
}
if state == "ON" {
e2e.Logf("The instance %s is already running", hostname)
return true, nil
}
if state == "OFF" {
err = nutanix.ChangeNutanixVMState(instanceID, "ON")
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be started.", state)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance is not in a state from which it can be started."))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
b484bdd9-9a64-45fe-8955-a4f7fb0326fa
|
stopInstanceOnNutanix
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func stopInstanceOnNutanix(nutanix *exutil.NutanixClient, hostname string) {
instanceID, err := nutanix.GetNutanixVMUUID(hostname)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The instance %s UUID is :%s", hostname, instanceID)
stateErr := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
state, err := nutanix.GetNutanixVMState(instanceID)
if err != nil {
e2e.Logf("Failed to get instance state %s, Error: %v", hostname, err)
return false, nil
}
if state == "OFF" {
e2e.Logf("The instance is already stopped.")
return true, nil
}
if state == "ON" {
err = nutanix.ChangeNutanixVMState(instanceID, "OFF")
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
}
e2e.Logf("The instance is in %v,not in a state from which it can be stopped.", state)
return false, nil
})
exutil.AssertWaitPollNoErr(stateErr, fmt.Sprintf("The instance is not in a state from which it can be stopped."))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
bf6eef56-ec26-44f7-8823-b538d082a1fd
|
checkDisconnect
|
['"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func checkDisconnect(oc *exutil.CLI) bool {
workNode, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
curlCMD := "curl -I ifconfig.me --connect-timeout 5"
output, err := exutil.DebugNode(oc, workNode, "bash", "-c", curlCMD)
if !strings.Contains(output, "HTTP") || err != nil {
e2e.Logf("Unable to access the public Internet from the cluster.")
return true
}
e2e.Logf("Successfully connected to the public Internet from the cluster.")
return false
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
89483c86-8d74-4960-abb6-b936fd1b13b6
|
newIBMPowerInstance
|
['ibmPowerVsInstance', 'instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func newIBMPowerInstance(oc *exutil.CLI, clientPowerVs *exutil.IBMPowerVsSession, ibmRegion, ibmVpcName, nodeName string) *ibmPowerVsInstance {
return &ibmPowerVsInstance{
instance: instance{
nodeName: nodeName,
oc: oc,
},
clientPowerVs: clientPowerVs,
ibmRegion: ibmRegion,
ibmVpcName: ibmVpcName,
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
223d2d7c-86cd-49a1-8d63-e4ba6a11b1c2
|
Start
|
['ibmPowerVsInstance', 'instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func (ibmPws *ibmPowerVsInstance) Start() error {
instanceID, status, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
e2e.Logf("\n The ibmPowervs instance %s is currently in state: %s \n", ibmPws.nodeName, status)
if status == "active" {
e2e.Logf("The node is already in active state, no need to start it again\n")
return nil
}
return exutil.PerformInstanceActionOnPowerVs(ibmPws.clientPowerVs, instanceID, "start")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c9b4a8e0-7291-42dd-ba06-67321ab7e9dc
|
Stop
|
['ibmPowerVsInstance', 'instance']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func (ibmPws *ibmPowerVsInstance) Stop() error {
instanceID, status, idErr := exutil.GetIBMPowerVsInstanceInfo(ibmPws.clientPowerVs, ibmPws.nodeName)
o.Expect(idErr).NotTo(o.HaveOccurred())
e2e.Logf("\n The ibmPowervs instance %s is currently in state: %s \n", ibmPws.nodeName, status)
if status == "shutoff" {
e2e.Logf("The node is already in shutoff state, no need to stop it again\n")
return nil
}
return exutil.PerformInstanceActionOnPowerVs(ibmPws.clientPowerVs, instanceID, "stop")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
a95ffa36-5c05-45cf-b357-623df81a839c
|
cfgRouteOnExternalHost
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func cfgRouteOnExternalHost(oc *exutil.CLI, host string, user string, pod string, ns string, externalIntf string) bool {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, pod, "-o=jsonpath={.spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeIp := getNodeIPv4(oc, ns, nodeName)
podIP := getPodIPv4(oc, ns, pod)
routeCmd := "ip route add " + podIP + " via " + nodeIp + " dev " + externalIntf
err = sshRunCmd(host, user, routeCmd)
if err != nil {
e2e.Logf("send command %v fail with error info %v", routeCmd, err)
return false
} else {
return true
}
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
61275ca9-6ba3-4c5f-b374-490c5c61128e
|
rmRouteOnExternalHost
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func rmRouteOnExternalHost(oc *exutil.CLI, host string, user string, pod string, ns string) {
var chkRes bool
podIP := getPodIPv4(oc, ns, pod)
routeCmd := "ip route delete " + podIP + " && " + "ip route"
ipRoute := podIP + "/32"
outPut, err := sshRunCmdOutPut(host, user, routeCmd)
if err != nil || strings.Contains(outPut, ipRoute) {
e2e.Logf("send command %v fail with error info %v", routeCmd, err)
chkRes = false
} else {
e2e.Logf("successfully removed the ip route %v, %v", podIP, outPut)
chkRes = true
}
o.Expect(chkRes).To(o.BeTrue())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
078e09ff-7596-4561-9627-cac259f3dabf
|
sshRunCmdOutPut
|
['"os"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/cloud_util.go
|
func sshRunCmdOutPut(host string, user string, cmd string) (string, error) {
privateKey := os.Getenv("SSH_CLOUD_PRIV_KEY")
if privateKey == "" {
privateKey = "../internal/config/keys/openshift-qe.pem"
}
sshClient := exutil.SshClient{User: user, Host: host, Port: 22, PrivateKey: privateKey}
return sshClient.RunOutput(cmd)
}
|
networking
| ||||
file
|
openshift/openshift-tests-private
|
12cdedfd-ee0c-4ea2-850d-911737c2ca66
|
egressqos_util
|
import (
"fmt"
"net"
"os"
"regexp"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
package networking
import (
"fmt"
"net"
"os"
"regexp"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type egressQosResource struct {
name string
namespace string
tempfile string
kind string
}
type networkingRes struct {
name string
namespace string
tempfile string
kind string
}
// create networking resource
func (rs *networkingRes) create(oc *exutil.CLI, parameters ...string) {
paras := []string{"-f", rs.tempfile, "--ignore-unknown-parameters=true", "-p"}
for _, para := range parameters {
paras = append(paras, para)
}
exutil.ApplyNsResourceFromTemplate(oc, rs.namespace, paras...)
}
// delete egressqos resource
func (rs *egressQosResource) delete(oc *exutil.CLI) {
e2e.Logf("delete %s %s in namespace %s", rs.kind, rs.name, rs.namespace)
oc.AsAdmin().WithoutNamespace().Run("delete").Args(rs.kind, rs.name, "-n", rs.namespace, "--ignore-not-found=true").Execute()
}
// create egressqos resource
func (rs *egressQosResource) create(oc *exutil.CLI, parameters ...string) {
paras := []string{"-f", rs.tempfile, "--ignore-unknown-parameters=true", "-p"}
for _, para := range parameters {
paras = append(paras, para)
}
exutil.ApplyNsResourceFromTemplate(oc, rs.namespace, paras...)
}
// create egressqos resource with output
func (rs *egressQosResource) createWithOutput(oc *exutil.CLI, parameters ...string) (string, error) {
var configFile string
cmd := []string{"-f", rs.tempfile, "--ignore-unknown-parameters=true", "-p"}
for _, para := range parameters {
cmd = append(cmd, para)
}
e2e.Logf("parameters list is %s\n", cmd)
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(cmd...).OutputToFile(getRandomString() + "config.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v resource: %v", rs.kind, cmd))
e2e.Logf("the file of resource is %s\n", configFile)
output, err1 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile, "-n", rs.namespace).Output()
return output, err1
}
func runSSHCmdOnAWS(host string, cmd string) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_AWS_USER")
if user == "" {
user = "core"
}
sshkey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
sshClient := exutil.SshClient{User: user, Host: host, Port: 22, PrivateKey: sshkey}
return sshClient.RunOutput(cmd)
}
func installDscpServiceOnAWS(a *exutil.AwsClient, oc *exutil.CLI, publicIP string) error {
command := "sudo netstat -ntlp | grep 9096 || sudo podman run --name dscpecho -d -p 9096:8080 quay.io/openshifttest/hello-sdn@sha256:2af5b5ec480f05fda7e9b278023ba04724a3dd53a296afcd8c13f220dec52197"
e2e.Logf("Run command %s", command)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
return err
}
updateAwsIntSvcSecurityRule(a, oc, 9096)
return nil
}
func startTcpdumpOnDscpService(a *exutil.AwsClient, oc *exutil.CLI, publicIP string, pktfile string) {
//start tcpdump
tcpdumpCmd := "'tcpdump tcp -c 5 -vvv -i eth0 -n and dst port 8080 > '" + fmt.Sprintf("%s", pktfile)
command := "sudo podman exec -d dscpecho bash -c " + tcpdumpCmd
e2e.Logf("Run command %s", command)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func chkDSCPinPkts(a *exutil.AwsClient, oc *exutil.CLI, publicIP string, pktfile string, dscp int) bool {
command := "sudo podman exec -- dscpecho cat " + fmt.Sprintf("%s", pktfile)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
return false
}
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Captured packets are %s", outPut)
tosHex := dscpDecConvertToHex(dscp)
dscpString := "tos 0x" + tosHex
if !strings.Contains(outPut, dscpString) {
e2e.Logf("Captured packets doesn't contain dscp value %s", dscpString)
return false
}
e2e.Logf("Captured packets contains dscp value %s", dscpString)
return true
}
func chkDSCPandEIPinPkts(a *exutil.AwsClient, oc *exutil.CLI, publicIP string, pktfile string, dscp int, egressip string) bool {
command := "sudo podman exec -- dscpecho cat " + fmt.Sprintf("%s", pktfile)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
return false
}
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Captured packets are %s", outPut)
tosHex := dscpDecConvertToHex(dscp)
dscpString := "tos 0x" + tosHex
if !strings.Contains(outPut, dscpString) {
e2e.Logf("Captured packets doesn't contain dscp value %s", dscpString)
return false
}
if !strings.Contains(outPut, egressip) {
e2e.Logf("Captured packets doesn't contain egressip %s", egressip)
return false
}
e2e.Logf("Captured packets contains dscp value %s or egressip %v", dscpString, egressip)
return true
}
func rmPktsFile(a *exutil.AwsClient, oc *exutil.CLI, publicIP string, pktfile string) {
command := "sudo podman exec -- dscpecho rm " + fmt.Sprintf("%s", pktfile)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func dscpDecConvertToHex(dscp int) string {
tosInt := dscp * 4
tosHex := fmt.Sprintf("%x", tosInt)
e2e.Logf("The dscp hex value is %v", tosHex)
return tosHex
}
func startCurlTraffic(oc *exutil.CLI, ns string, pod string, dstip string, dstport string) {
e2e.Logf("start curl traffic")
dstURL := net.JoinHostPort(dstip, dstport)
cmd := "curl -k " + dstURL
outPut, err := exutil.RemoteShPodWithBash(oc, ns, pod, cmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(outPut).Should(o.ContainSubstring("Hello OpenShift"))
}
func chkEgressQosStatus(oc *exutil.CLI, ns string) {
nodeList, err := exutil.GetAllNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
outPut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressqos", "default", "-n", ns, "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, nodeName := range nodeList {
subString := "Ready-In-Zone-" + nodeName
o.Expect(strings.Contains(outPut, subString)).To(o.BeTrue())
}
}
func getEgressQosAddSet(oc *exutil.CLI, node string, ns string) []string {
//get ovnkube pod of this node
podName, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", node)
o.Expect(err).NotTo(o.HaveOccurred())
nsFilter := "external-ids:k8s.ovn.org/name=" + ns
output, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", "openshift-ovn-kubernetes", podName, "ovn-nbctl", "find", "address_set",
"external-ids:k8s.ovn.org/owner-type=EgressQoS", nsFilter).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("The egressqos addresset output is %v", output)
re := regexp.MustCompile(`\"(\d+.\d+.\d+.\d+)\"`)
addrList := re.FindAllString(output, -1)
e2e.Logf("The ip addresses which matched egressqos rules are %v", addrList)
return addrList
}
func chkAddSet(oc *exutil.CLI, podname string, ns string, iplist []string, expect bool) {
podIP := getPodIPv4(oc, ns, podname)
re := regexp.MustCompile(podIP)
ipStr := strings.Join(iplist, " ")
matchRes := re.MatchString(ipStr)
if expect {
o.Expect(matchRes).To(o.BeTrue())
} else {
o.Expect(matchRes).To(o.BeFalse())
}
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
8a666793-c72b-4ac9-8f64-a47f0ff8559c
|
create
|
['networkingRes']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func (rs *networkingRes) create(oc *exutil.CLI, parameters ...string) {
paras := []string{"-f", rs.tempfile, "--ignore-unknown-parameters=true", "-p"}
for _, para := range parameters {
paras = append(paras, para)
}
exutil.ApplyNsResourceFromTemplate(oc, rs.namespace, paras...)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
87d4fc18-8608-47e0-9f9b-e8db91475949
|
delete
|
['egressQosResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func (rs *egressQosResource) delete(oc *exutil.CLI) {
e2e.Logf("delete %s %s in namespace %s", rs.kind, rs.name, rs.namespace)
oc.AsAdmin().WithoutNamespace().Run("delete").Args(rs.kind, rs.name, "-n", rs.namespace, "--ignore-not-found=true").Execute()
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
89baabd5-0c52-499c-bc6e-38c30f6cde7f
|
create
|
['egressQosResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func (rs *egressQosResource) create(oc *exutil.CLI, parameters ...string) {
paras := []string{"-f", rs.tempfile, "--ignore-unknown-parameters=true", "-p"}
for _, para := range parameters {
paras = append(paras, para)
}
exutil.ApplyNsResourceFromTemplate(oc, rs.namespace, paras...)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
92a7548c-80f5-4c68-ba6b-ae9ca6be46b3
|
createWithOutput
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['egressQosResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func (rs *egressQosResource) createWithOutput(oc *exutil.CLI, parameters ...string) (string, error) {
var configFile string
cmd := []string{"-f", rs.tempfile, "--ignore-unknown-parameters=true", "-p"}
for _, para := range parameters {
cmd = append(cmd, para)
}
e2e.Logf("parameters list is %s\n", cmd)
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(cmd...).OutputToFile(getRandomString() + "config.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v resource: %v", rs.kind, cmd))
e2e.Logf("the file of resource is %s\n", configFile)
output, err1 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile, "-n", rs.namespace).Output()
return output, err1
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
1a8e9b3e-5d2d-4088-b531-ffc48e119bef
|
runSSHCmdOnAWS
|
['"os"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func runSSHCmdOnAWS(host string, cmd string) (string, error) {
user := os.Getenv("SSH_CLOUD_PRIV_AWS_USER")
if user == "" {
user = "core"
}
sshkey, err := exutil.GetPrivateKey()
o.Expect(err).NotTo(o.HaveOccurred())
sshClient := exutil.SshClient{User: user, Host: host, Port: 22, PrivateKey: sshkey}
return sshClient.RunOutput(cmd)
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
16ce9149-c5cd-4fc9-be17-d73884bd790e
|
installDscpServiceOnAWS
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func installDscpServiceOnAWS(a *exutil.AwsClient, oc *exutil.CLI, publicIP string) error {
command := "sudo netstat -ntlp | grep 9096 || sudo podman run --name dscpecho -d -p 9096:8080 quay.io/openshifttest/hello-sdn@sha256:2af5b5ec480f05fda7e9b278023ba04724a3dd53a296afcd8c13f220dec52197"
e2e.Logf("Run command %s", command)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
return err
}
updateAwsIntSvcSecurityRule(a, oc, 9096)
return nil
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
fc1f44d8-bc95-4581-b60e-d5123a61bd90
|
startTcpdumpOnDscpService
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func startTcpdumpOnDscpService(a *exutil.AwsClient, oc *exutil.CLI, publicIP string, pktfile string) {
//start tcpdump
tcpdumpCmd := "'tcpdump tcp -c 5 -vvv -i eth0 -n and dst port 8080 > '" + fmt.Sprintf("%s", pktfile)
command := "sudo podman exec -d dscpecho bash -c " + tcpdumpCmd
e2e.Logf("Run command %s", command)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
1dda5f7f-897f-493b-9ffb-f9fa4d255544
|
chkDSCPinPkts
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func chkDSCPinPkts(a *exutil.AwsClient, oc *exutil.CLI, publicIP string, pktfile string, dscp int) bool {
command := "sudo podman exec -- dscpecho cat " + fmt.Sprintf("%s", pktfile)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
return false
}
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Captured packets are %s", outPut)
tosHex := dscpDecConvertToHex(dscp)
dscpString := "tos 0x" + tosHex
if !strings.Contains(outPut, dscpString) {
e2e.Logf("Captured packets doesn't contain dscp value %s", dscpString)
return false
}
e2e.Logf("Captured packets contains dscp value %s", dscpString)
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
be614534-1add-42da-a2e8-3cb2576b1430
|
chkDSCPandEIPinPkts
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func chkDSCPandEIPinPkts(a *exutil.AwsClient, oc *exutil.CLI, publicIP string, pktfile string, dscp int, egressip string) bool {
command := "sudo podman exec -- dscpecho cat " + fmt.Sprintf("%s", pktfile)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
return false
}
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Captured packets are %s", outPut)
tosHex := dscpDecConvertToHex(dscp)
dscpString := "tos 0x" + tosHex
if !strings.Contains(outPut, dscpString) {
e2e.Logf("Captured packets doesn't contain dscp value %s", dscpString)
return false
}
if !strings.Contains(outPut, egressip) {
e2e.Logf("Captured packets doesn't contain egressip %s", egressip)
return false
}
e2e.Logf("Captured packets contains dscp value %s or egressip %v", dscpString, egressip)
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
dcfcc3d7-867e-48a3-b596-d994bc932921
|
rmPktsFile
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func rmPktsFile(a *exutil.AwsClient, oc *exutil.CLI, publicIP string, pktfile string) {
command := "sudo podman exec -- dscpecho rm " + fmt.Sprintf("%s", pktfile)
outPut, err := runSSHCmdOnAWS(publicIP, command)
if err != nil {
e2e.Logf("Failed to run %v: %v", command, outPut)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
6d080c37-752e-4783-aa02-b0f3b079ee9c
|
dscpDecConvertToHex
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func dscpDecConvertToHex(dscp int) string {
tosInt := dscp * 4
tosHex := fmt.Sprintf("%x", tosInt)
e2e.Logf("The dscp hex value is %v", tosHex)
return tosHex
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
14de035b-a309-40a2-acc7-5b77fa09089f
|
startCurlTraffic
|
['"net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func startCurlTraffic(oc *exutil.CLI, ns string, pod string, dstip string, dstport string) {
e2e.Logf("start curl traffic")
dstURL := net.JoinHostPort(dstip, dstport)
cmd := "curl -k " + dstURL
outPut, err := exutil.RemoteShPodWithBash(oc, ns, pod, cmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(outPut).Should(o.ContainSubstring("Hello OpenShift"))
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
2ca51000-7d28-4fd0-a90b-71f4f5d3d2fe
|
chkEgressQosStatus
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func chkEgressQosStatus(oc *exutil.CLI, ns string) {
nodeList, err := exutil.GetAllNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
outPut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("egressqos", "default", "-n", ns, "-o", "yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, nodeName := range nodeList {
subString := "Ready-In-Zone-" + nodeName
o.Expect(strings.Contains(outPut, subString)).To(o.BeTrue())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
341c3d7a-0778-4ee5-9995-18b25cdb445a
|
getEgressQosAddSet
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func getEgressQosAddSet(oc *exutil.CLI, node string, ns string) []string {
//get ovnkube pod of this node
podName, err := exutil.GetPodName(oc, "openshift-ovn-kubernetes", "app=ovnkube-node", node)
o.Expect(err).NotTo(o.HaveOccurred())
nsFilter := "external-ids:k8s.ovn.org/name=" + ns
output, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", "openshift-ovn-kubernetes", podName, "ovn-nbctl", "find", "address_set",
"external-ids:k8s.ovn.org/owner-type=EgressQoS", nsFilter).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("The egressqos addresset output is %v", output)
re := regexp.MustCompile(`\"(\d+.\d+.\d+.\d+)\"`)
addrList := re.FindAllString(output, -1)
e2e.Logf("The ip addresses which matched egressqos rules are %v", addrList)
return addrList
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
2f89d87a-2f29-4ddf-8ac0-3aaa8af377b3
|
chkAddSet
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/egressqos_util.go
|
func chkAddSet(oc *exutil.CLI, podname string, ns string, iplist []string, expect bool) {
podIP := getPodIPv4(oc, ns, podname)
re := regexp.MustCompile(podIP)
ipStr := strings.Join(iplist, " ")
matchRes := re.MatchString(ipStr)
if expect {
o.Expect(matchRes).To(o.BeTrue())
} else {
o.Expect(matchRes).To(o.BeFalse())
}
}
|
networking
| ||||
file
|
openshift/openshift-tests-private
|
9db9f09c-f969-4479-b924-b9d10dcc2618
|
infw_util
|
import (
"fmt"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
package networking
import (
"fmt"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type infwCResource struct {
name string
primary_inf string
nodelabel string
src_cidr1 string
protocol_1 string
protocoltype1 string
range_1 string
action_1 string
protocol_2 string
protocoltype2 string
range_2 string
action_2 string
template string
}
type infwCResource_multiple_cidr struct {
name string
primary_inf string
nodelabel string
src_cidr1 string
src_cidr2 string
protocol_1 string
protocoltype1 string
range_1 string
action_1 string
protocol_2 string
protocoltype2 string
range_2 string
action_2 string
template string
}
type infwCResource_icmp struct {
name string
primary_inf string
nodelabel string
src_cidr string
action_1 string
action_2 string
template string
}
type infwConfigResource struct {
namespace string
nodelabel string
template string
}
func (infw *infwCResource) createinfwCR(oc *exutil.CLI) {
g.By("Creating infw CR from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", infw.template, "-p", "NAME="+infw.name, "PRIMARY_INF="+infw.primary_inf, "NODELABEL="+infw.nodelabel, "SRC_CIDR1="+infw.src_cidr1, "PROTOCOL_1="+infw.protocol_1, "PROTOCOLTYPE1="+infw.protocoltype1, "RANGE_1="+infw.range_1, "ACTION_1="+infw.action_1, "PROTOCOL_2="+infw.protocol_2, "PROTOCOLTYPE2="+infw.protocoltype2, "RANGE_2="+infw.range_2, "ACTION_2="+infw.action_2)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create infw CR %v", infw.name))
}
func (infw_multiple_cidr *infwCResource_multiple_cidr) createinfwCR_multiple_cidr(oc *exutil.CLI) {
g.By("Creating infw CR from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", infw_multiple_cidr.template, "-p", "NAME="+infw_multiple_cidr.name, "PRIMARY_INF="+infw_multiple_cidr.primary_inf, "NODELABEL="+infw_multiple_cidr.nodelabel, "SRC_CIDR1="+infw_multiple_cidr.src_cidr1, "SRC_CIDR2="+infw_multiple_cidr.src_cidr2, "PROTOCOL_1="+infw_multiple_cidr.protocol_1, "PROTOCOLTYPE1="+infw_multiple_cidr.protocoltype1, "RANGE_1="+infw_multiple_cidr.range_1, "ACTION_1="+infw_multiple_cidr.action_1, "PROTOCOLTYPE2="+infw_multiple_cidr.protocoltype2, "PROTOCOL_2="+infw_multiple_cidr.protocol_2, "RANGE_2="+infw_multiple_cidr.range_2, "ACTION_2="+infw_multiple_cidr.action_2)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create infw CR %v", infw_multiple_cidr.name))
}
func (infwcfg *infwConfigResource) createinfwConfig(oc *exutil.CLI) {
g.By("Creating infw config from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", infwcfg.template, "-p", "NAMESPACE="+infwcfg.namespace, "NODELABEL="+infwcfg.nodelabel)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create infw Config Resource"))
}
func (infwICMP *infwCResource_icmp) createinfwICMP(oc *exutil.CLI) {
g.By("Creating infw ICMP from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", infwICMP.template, "-p", "NAME="+infwICMP.name, "PRIMARY_INF="+infwICMP.primary_inf, "NODELABEL="+infwICMP.nodelabel, "SRC_CIDR="+infwICMP.src_cidr, "ACTION_2="+infwICMP.action_2, "ACTION_1="+infwICMP.action_1)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create infw ICMP"))
}
func deleteinfwCR(oc *exutil.CLI, cr string) {
e2e.Logf("delete %s in namespace %s", "openshift-ingress-node-firewall", cr)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("IngressNodeFirewall", cr).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func deleteinfwCfg(oc *exutil.CLI) {
e2e.Logf("deleting ingressnodefirewallconfig in namespace openshift-ingress-node-firewall")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("IngressNodeFirewallConfig", "ingressnodefirewallconfig", "-n", "openshift-ingress-node-firewall").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func restartInfwDaemons(oc *exutil.CLI) {
e2e.Logf("Restarting ingress node firewall daemons in namespace openshift-ingress-node-firewall")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-l=app=ingress-node-firewall-daemon", "-n", "openshift-ingress-node-firewall").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, "openshift-ingress-node-firewall", "app=ingress-node-firewall-daemon")
exutil.AssertWaitPollNoErr(err, "Ingress node firewall daemons not ready")
}
func getinfwDaemonForNode(oc *exutil.CLI, nodeName string) string {
infwDaemon, err := exutil.GetPodName(oc, "openshift-ingress-node-firewall", "app=ingress-node-firewall-daemon", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
return infwDaemon
}
func waitforInfwDaemonsready(oc *exutil.CLI) {
err := waitForPodWithLabelReady(oc, "openshift-ingress-node-firewall", "app=ingress-node-firewall-daemon")
exutil.AssertWaitPollNoErr(err, "Ingress node firewall daemons not ready")
}
|
package networking
| ||||
function
|
openshift/openshift-tests-private
|
1a39f81e-11ac-44cb-8459-64f8a7d4662f
|
createinfwCR
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['infwCResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
func (infw *infwCResource) createinfwCR(oc *exutil.CLI) {
g.By("Creating infw CR from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", infw.template, "-p", "NAME="+infw.name, "PRIMARY_INF="+infw.primary_inf, "NODELABEL="+infw.nodelabel, "SRC_CIDR1="+infw.src_cidr1, "PROTOCOL_1="+infw.protocol_1, "PROTOCOLTYPE1="+infw.protocoltype1, "RANGE_1="+infw.range_1, "ACTION_1="+infw.action_1, "PROTOCOL_2="+infw.protocol_2, "PROTOCOLTYPE2="+infw.protocoltype2, "RANGE_2="+infw.range_2, "ACTION_2="+infw.action_2)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create infw CR %v", infw.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
c2c11a69-a1b9-4ab2-8fcf-a676fb848a20
|
createinfwCR_multiple_cidr
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['infwCResource_multiple_cidr']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
func (infw_multiple_cidr *infwCResource_multiple_cidr) createinfwCR_multiple_cidr(oc *exutil.CLI) {
g.By("Creating infw CR from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", infw_multiple_cidr.template, "-p", "NAME="+infw_multiple_cidr.name, "PRIMARY_INF="+infw_multiple_cidr.primary_inf, "NODELABEL="+infw_multiple_cidr.nodelabel, "SRC_CIDR1="+infw_multiple_cidr.src_cidr1, "SRC_CIDR2="+infw_multiple_cidr.src_cidr2, "PROTOCOL_1="+infw_multiple_cidr.protocol_1, "PROTOCOLTYPE1="+infw_multiple_cidr.protocoltype1, "RANGE_1="+infw_multiple_cidr.range_1, "ACTION_1="+infw_multiple_cidr.action_1, "PROTOCOLTYPE2="+infw_multiple_cidr.protocoltype2, "PROTOCOL_2="+infw_multiple_cidr.protocol_2, "RANGE_2="+infw_multiple_cidr.range_2, "ACTION_2="+infw_multiple_cidr.action_2)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create infw CR %v", infw_multiple_cidr.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
9c654150-93f8-42b4-9474-c36eecb54d57
|
createinfwConfig
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['infwConfigResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
func (infwcfg *infwConfigResource) createinfwConfig(oc *exutil.CLI) {
g.By("Creating infw config from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", infwcfg.template, "-p", "NAMESPACE="+infwcfg.namespace, "NODELABEL="+infwcfg.nodelabel)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create infw Config Resource"))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
87e31ec1-a5ea-4d55-82b6-756385d55cb0
|
createinfwICMP
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['infwCResource_icmp']
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
func (infwICMP *infwCResource_icmp) createinfwICMP(oc *exutil.CLI) {
g.By("Creating infw ICMP from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", infwICMP.template, "-p", "NAME="+infwICMP.name, "PRIMARY_INF="+infwICMP.primary_inf, "NODELABEL="+infwICMP.nodelabel, "SRC_CIDR="+infwICMP.src_cidr, "ACTION_2="+infwICMP.action_2, "ACTION_1="+infwICMP.action_1)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create infw ICMP"))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
e05efb87-ad9f-4d16-82ed-c7cf28bb7476
|
deleteinfwCR
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
func deleteinfwCR(oc *exutil.CLI, cr string) {
e2e.Logf("delete %s in namespace %s", "openshift-ingress-node-firewall", cr)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("IngressNodeFirewall", cr).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
614d244b-5cf8-4a9a-9f89-e78573cb6039
|
deleteinfwCfg
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
func deleteinfwCfg(oc *exutil.CLI) {
e2e.Logf("deleting ingressnodefirewallconfig in namespace openshift-ingress-node-firewall")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("IngressNodeFirewallConfig", "ingressnodefirewallconfig", "-n", "openshift-ingress-node-firewall").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
5f18b961-90a9-4b54-9eb3-8018bfad1bbf
|
restartInfwDaemons
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
func restartInfwDaemons(oc *exutil.CLI) {
e2e.Logf("Restarting ingress node firewall daemons in namespace openshift-ingress-node-firewall")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-l=app=ingress-node-firewall-daemon", "-n", "openshift-ingress-node-firewall").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = waitForPodWithLabelReady(oc, "openshift-ingress-node-firewall", "app=ingress-node-firewall-daemon")
exutil.AssertWaitPollNoErr(err, "Ingress node firewall daemons not ready")
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
a0b22dcc-0864-4c44-aa3f-fc30bfac4b63
|
getinfwDaemonForNode
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
func getinfwDaemonForNode(oc *exutil.CLI, nodeName string) string {
infwDaemon, err := exutil.GetPodName(oc, "openshift-ingress-node-firewall", "app=ingress-node-firewall-daemon", nodeName)
o.Expect(err).NotTo(o.HaveOccurred())
return infwDaemon
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
93470396-b7ca-4831-b474-63e874950817
|
waitforInfwDaemonsready
|
github.com/openshift/openshift-tests-private/test/extended/networking/infw_util.go
|
func waitforInfwDaemonsready(oc *exutil.CLI) {
err := waitForPodWithLabelReady(oc, "openshift-ingress-node-firewall", "app=ingress-node-firewall-daemon")
exutil.AssertWaitPollNoErr(err, "Ingress node firewall daemons not ready")
}
|
networking
| |||||
test
|
openshift/openshift-tests-private
|
f411d1c0-b233-4016-af65-622501ec3e75
|
multinetworkpolicy
|
import (
"fmt"
"net"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
|
github.com/openshift/openshift-tests-private/test/extended/networking/multinetworkpolicy.go
|
package networking
import (
"fmt"
"net"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-networking] SDN multinetworkpolicy", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("networking-multinetworkpolicy", exutil.KubeConfigPath())
g.BeforeEach(func() {
if checkProxy(oc) {
g.Skip("This is proxy cluster, skip the test.")
}
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", "console", "-n", "openshift-console").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if err != nil || !(strings.Contains(msg, "sriov.openshift-qe.sdn.com") || strings.Contains(msg, "offload.openshift-qe.sdn.com")) {
g.Skip("This case will only run on rdu1/rdu2 cluster. , skip for other envrionment!!!")
}
})
// author: [email protected]
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41168-MultiNetworkPolicy ingress allow same podSelector with same namespaceSelector. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "ingress-allow-same-podSelector-with-same-namespaceSelector.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41168a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41168b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod1ns1's secondary interface in first namespace.")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-1")
exutil.By("3. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("4. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("5. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("6. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("7. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("8. Create Ingress-allow-same-podSelector-with-same-namespaceSelector policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ingress-allow-same-podselector-with-same-namespaceselector"))
exutil.By("9. Same curl testing, one curl pass and three curls will fail after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "red-pod-1", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-3", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "red-pod-2", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-2", pod1ns1IPv4, pod1ns1IPv6)
exutil.By("10. Delete ingress-allow-same-podselector-with-same-namespaceselector policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "ingress-allow-same-podselector-with-same-namespaceselector", "-n", ns1)
exutil.By("11. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
})
// author: [email protected]
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41169-MultiNetworkPolicy ingress allow diff podSelector with same namespaceSelector. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "ingress-allow-diff-podSelector-with-same-namespaceSelector.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41169a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41169b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod1ns1's secondary interface in first namespace.")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-1")
exutil.By("3. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("4. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("5. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("6. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("7. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("8. Create Ingress-allow-same-podSelector-with-same-namespaceSelector policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ingress-allow-diff-podselector-with-same-namespaceselector"))
exutil.By("9. Same curl testing, one curl fail and three curls will pass after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-2", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "red-pod-1", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-3", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "red-pod-2", pod1ns1IPv4, pod1ns1IPv6)
exutil.By("10. Delete ingress-allow-diff-podselector-with-same-namespaceselector policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "ingress-allow-diff-podselector-with-same-namespaceselector", "-n", ns1)
exutil.By("11. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
})
// author: [email protected]
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41171-MultiNetworkPolicy egress allow same podSelector with same namespaceSelector. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "egress-allow-same-podSelector-with-same-namespaceSelector.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41171a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41171b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("3. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("4. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("5. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("7. Create egress-allow-same-podSelector-with-same-namespaceSelector policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("egress-allow-same-podselector-with-same-namespaceselector"))
exutil.By("8. Same curl testing, one curl pass and three curls will fail after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
exutil.By("9. Delete egress-allow-same-podselector-with-same-namespaceselector policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "egress-allow-same-podselector-with-same-namespaceselector", "-n", ns1)
exutil.By("10. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
})
// author: [email protected]
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41172-MultiNetworkPolicy egress allow diff podSelector with same namespaceSelector. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "egress-allow-diff-podSelector-with-same-namespaceSelector.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41172a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41172b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("3. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("4. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("5. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("7. Create egress-allow-diff-podSelector-with-same-namespaceSelector policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("egress-allow-diff-podselector-with-same-namespaceselector"))
exutil.By("8. Same curl testing, one curl pass and three curls will fail after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("9. Delete egress-allow-diff-podselector-with-same-namespaceselector policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "egress-allow-diff-podselector-with-same-namespaceselector", "-n", ns1)
exutil.By("10. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
})
// author: [email protected]
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41170-MultiNetworkPolicy ingress ipblock. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
patchSResource := "networks.operator.openshift.io/cluster"
pingPodTemplate := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-pod-template.yaml")
netAttachDefFile := filepath.Join(buildPruningBaseDir, "ipblock-NAD.yaml")
policyFile := filepath.Join(buildPruningBaseDir, "ingress-ipBlock.yaml")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
exutil.By("1. Enable MacvlanNetworkpolicy in the cluster")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
exutil.By("2. Create a namespace")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41170a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
exutil.By("3. Create MultiNetworkPolicy-NAD in ns1")
err1 := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns1).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
output, err2 := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns1).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ipblock-net"))
exutil.By("4. Create six pods for ip range policy testing")
pod1ns1 := testPodMultinetwork{
name: "blue-pod-1",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod1ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
pod2ns1 := testPodMultinetwork{
name: "blue-pod-2",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod2ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
pod3ns1 := testPodMultinetwork{
name: "blue-pod-3",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod3ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
pod4ns1 := testPodMultinetwork{
name: "blue-pod-4",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod4ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod4ns1.namespace, pod4ns1.name)
pod5ns1 := testPodMultinetwork{
name: "blue-pod-5",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod5ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod5ns1.namespace, pod5ns1.name)
pod6ns1 := testPodMultinetwork{
name: "blue-pod-6",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod6ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod6ns1.namespace, pod6ns1.name)
g.By("5. Get IPs from all six pod's secondary interfaces")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-1")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-3")
pod4ns1IPv4, pod4ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-4")
pod5ns1IPv4, pod5ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-5")
pod6ns1IPv4, pod6ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-6")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod6ns1IPv4, pod6ns1IPv6)
exutil.By("7. Create ingress-ipBlock policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err3 := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err3).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ingress-ipblock"))
exutil.By("8. Curl should fail after applying policy")
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-4", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-5", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-2", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-3", pod1ns1IPv4, pod1ns1IPv6)
exutil.By("9. Delete ingress-ipBlock policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "ingress-ipblock", "-n", ns1)
exutil.By("10. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod6ns1IPv4, pod6ns1IPv6)
})
// author: [email protected]
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41173-MultiNetworkPolicy egress ipblock. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
patchSResource := "networks.operator.openshift.io/cluster"
pingPodTemplate := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-pod-template.yaml")
netAttachDefFile := filepath.Join(buildPruningBaseDir, "ipblock-NAD.yaml")
policyFile := filepath.Join(buildPruningBaseDir, "egress-ipBlock.yaml")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
exutil.By("1. Enable MacvlanNetworkpolicy in the cluster")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
exutil.By("2. Create a namespace")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41173a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
exutil.By("3. Create MultiNetworkPolicy-NAD in ns1")
policyErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns1).Execute()
o.Expect(policyErr).NotTo(o.HaveOccurred())
nadOutput, nadErr := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns1).Output()
o.Expect(nadErr).NotTo(o.HaveOccurred())
o.Expect(nadOutput).To(o.ContainSubstring("ipblock-net"))
exutil.By("4. Create six pods for egress ip range policy testing")
pod1ns1 := testPodMultinetwork{
name: "blue-pod-1",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod1ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
pod2ns1 := testPodMultinetwork{
name: "blue-pod-2",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod2ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod2ns1.namespace, pod2ns1.name)
pod3ns1 := testPodMultinetwork{
name: "blue-pod-3",
namespace: ns1,
nodename: "worker-0",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod3ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod3ns1.namespace, pod3ns1.name)
pod4ns1 := testPodMultinetwork{
name: "blue-pod-4",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod4ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod4ns1.namespace, pod4ns1.name)
pod5ns1 := testPodMultinetwork{
name: "blue-pod-5",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod5ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod5ns1.namespace, pod5ns1.name)
pod6ns1 := testPodMultinetwork{
name: "blue-pod-6",
namespace: ns1,
nodename: "worker-1",
nadname: "ipblock-net",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod6ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod6ns1.namespace, pod6ns1.name)
exutil.By("5. Get IPs from all six pod's secondary interfaces")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-1")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-3")
pod4ns1IPv4, pod4ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-4")
pod5ns1IPv4, pod5ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-5")
pod6ns1IPv4, pod6ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-6")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod6ns1IPv4, pod6ns1IPv6)
exutil.By("7. Create egress-ipBlock policy in ns1")
policyCreateErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
o.Expect(policyCreateErr).NotTo(o.HaveOccurred())
policyCreOutput, policyCreErr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(policyCreErr).NotTo(o.HaveOccurred())
o.Expect(policyCreOutput).To(o.ContainSubstring("egress-ipblock"))
exutil.By("8. curl should fail for ip range 192.168.0.4-192.168.0.6 after applying policy")
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-2", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-2", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-3", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-4", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockFail(oc, ns1, "blue-pod-5", pod6ns1IPv4, pod6ns1IPv6)
exutil.By("9. Delete egress-ipBlock policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "egress-ipblock", "-n", ns1)
exutil.By("10. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-1", pod6ns1IPv4, pod6ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod4ns1IPv4, pod4ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod5ns1IPv4, pod5ns1IPv6)
curlPod2PodMultiNetworkIPBlockPass(oc, ns1, "blue-pod-6", pod6ns1IPv4, pod6ns1IPv6)
})
// author: [email protected]
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41607-Multinetworkpolicy filter-with-tcpport [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
patchSResource := "networks.operator.openshift.io/cluster"
tcpportPod := filepath.Join(buildPruningBaseDir, "tcpport-pod.yaml")
netAttachDefFile := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-NAD1.yaml")
pingPodTemplate := filepath.Join(buildPruningBaseDir, "MultiNetworkPolicy-pod-template.yaml")
policyFile := filepath.Join(buildPruningBaseDir, "policy-tcpport.yaml")
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
exutil.By("1. Enable MacvlanNetworkpolicy in the cluster")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
patchResourceAsAdmin(oc, patchSResource, patchInfoTrue)
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
exutil.By("2. Create a namespace")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns := "project41607"
defer oc.AsAdmin().Run("delete").Args("project", ns, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
exutil.By("3. Create MultiNetworkPolicy-NAD in ns")
policyErr := oc.AsAdmin().Run("create").Args("-f", netAttachDefFile, "-n", ns).Execute()
o.Expect(policyErr).NotTo(o.HaveOccurred())
nadOutput, nadErr := oc.AsAdmin().Run("get").Args("net-attach-def", "-n", ns).Output()
o.Expect(nadErr).NotTo(o.HaveOccurred())
o.Expect(nadOutput).To(o.ContainSubstring("macvlan-nad1"))
exutil.By("4. Create a tcpport pods for ingress tcp port testing")
createResourceFromFile(oc, ns, tcpportPod)
podErr := waitForPodWithLabelReady(oc, ns, "name=tcp-port-pod")
exutil.AssertWaitPollNoErr(podErr, "tcpportPod is not running")
podIPv4, _ := getPodMultiNetwork(oc, ns, "tcp-port-pod")
exutil.By("5. Create a test pods for ingress tcp port testing")
pod1ns1 := testPodMultinetwork{
name: "blue-pod-1",
namespace: ns,
nodename: "worker-1",
nadname: "macvlan-nad1",
labelname: "blue-openshift",
template: pingPodTemplate,
}
pod1ns1.createTestPodMultinetwork(oc)
waitPodReady(oc, pod1ns1.namespace, pod1ns1.name)
exutil.By("6. curl should pass before applying policy")
_, curl1Err := e2eoutput.RunHostCmd(ns, "blue-pod-1", "curl --connect-timeout 5 -s "+net.JoinHostPort(podIPv4, "8888"))
o.Expect(curl1Err).NotTo(o.HaveOccurred())
exutil.By("7. Create tcpport policy in ns")
policyCreateErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns).Execute()
o.Expect(policyCreateErr).NotTo(o.HaveOccurred())
policyCreOutput, policyCreErr := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns).Output()
o.Expect(policyCreErr).NotTo(o.HaveOccurred())
o.Expect(policyCreOutput).To(o.ContainSubstring("tcp-port"))
exutil.By("8. One curl should fail before applying policy")
_, curl2Err := e2eoutput.RunHostCmd(ns, "blue-pod-1", "curl --connect-timeout 5 -s "+net.JoinHostPort(podIPv4, "8888"))
o.Expect(curl2Err).To(o.HaveOccurred())
exutil.By("9. Delete tcp-port policy in ns")
removeResource(oc, true, true, "multi-networkpolicy", "tcp-port", "-n", ns)
exutil.By("10. curl should pass after deleting policy")
_, curl3Err := e2eoutput.RunHostCmd(ns, "blue-pod-1", "curl --connect-timeout 5 -s "+net.JoinHostPort(podIPv4, "8888"))
o.Expect(curl3Err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-55818-Rules are not removed after disabling multinetworkpolicy. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
//https://issues.redhat.com/browse/OCPBUGS-977: Rules are not removed after disabling multinetworkpolicy
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "creat-ten-rules.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41171a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41171b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("3. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("4. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("5. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("6. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("7. Create egress-allow-same-podSelector-with-same-namespaceSelector policy in ns1")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
policyList := []string{
"egress-allow-same-podselector-with-same-namespaceselector1",
"egress-allow-same-podselector-with-same-namespaceselector2",
"egress-allow-same-podselector-with-same-namespaceselector3",
"egress-allow-same-podselector-with-same-namespaceselector4",
"egress-allow-same-podselector-with-same-namespaceselector5",
"egress-allow-same-podselector-with-same-namespaceselector6",
"egress-allow-same-podselector-with-same-namespaceselector7",
"egress-allow-same-podselector-with-same-namespaceselector8",
"egress-allow-same-podselector-with-same-namespaceselector9",
"egress-allow-same-podselector-with-same-namespaceselector10",
}
for _, policyRule := range policyList {
e2e.Logf("The policy rule is: %s", policyRule)
o.Expect(output).To(o.ContainSubstring(policyRule))
}
exutil.By("8. Same curl testing, one curl pass and three curls will fail after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
exutil.By("9. Disable MultiNetworkpolicy in the cluster")
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
exutil.By("10. All curl should pass again after disabling MacvlanNetworkpolicy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
})
})
|
package networking
| ||||
test case
|
openshift/openshift-tests-private
|
8468ee7f-cbd1-427e-8cfd-7701887df6a3
|
Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41168-MultiNetworkPolicy ingress allow same podSelector with same namespaceSelector. [Disruptive]
|
['"fmt"', '"path/filepath"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/multinetworkpolicy.go
|
g.It("Author:weliang-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-41168-MultiNetworkPolicy ingress allow same podSelector with same namespaceSelector. [Disruptive]", func() {
exutil.SkipBaselineCaps(oc, "None")
buildPruningBaseDir := exutil.FixturePath("testdata", "networking/multinetworkpolicy")
policyFile := filepath.Join(buildPruningBaseDir, "ingress-allow-same-podSelector-with-same-namespaceSelector.yaml")
patchSResource := "networks.operator.openshift.io/cluster"
patchInfoTrue := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":true}}")
patchInfoFalse := fmt.Sprintf("{\"spec\":{\"useMultiNetworkPolicy\":false}}")
origContxt, contxtErr := oc.Run("config").Args("current-context").Output()
o.Expect(contxtErr).NotTo(o.HaveOccurred())
defer func() {
useContxtErr := oc.Run("config").Args("use-context", origContxt).Execute()
o.Expect(useContxtErr).NotTo(o.HaveOccurred())
}()
ns1 := "project41168a"
defer oc.AsAdmin().Run("delete").Args("project", ns1, "--ignore-not-found").Execute()
nserr1 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns1).Execute()
o.Expect(nserr1).NotTo(o.HaveOccurred())
_, proerr1 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns1, "user="+ns1).Output()
o.Expect(proerr1).NotTo(o.HaveOccurred())
ns2 := "project41168b"
defer oc.AsAdmin().Run("delete").Args("project", ns2, "--ignore-not-found").Execute()
nserr2 := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(ns2).Execute()
o.Expect(nserr2).NotTo(o.HaveOccurred())
_, proerr2 := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", ns2, "user="+ns2).Output()
o.Expect(proerr2).NotTo(o.HaveOccurred())
exutil.By("1. Prepare multus multinetwork including 2 ns,5 pods and 2 NADs")
defer func() {
patchResourceAsAdmin(oc, patchSResource, patchInfoFalse)
exutil.By("NetworkOperatorStatus should back to normal after disable useMultiNetworkPolicy")
waitForNetworkOperatorState(oc, 20, 10, "True.*True.*False")
waitForNetworkOperatorState(oc, 20, 20, "True.*False.*False")
}()
prepareMultinetworkTest(oc, ns1, ns2, patchInfoTrue)
exutil.By("2. Get IPs of the pod1ns1's secondary interface in first namespace.")
pod1ns1IPv4, pod1ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-1")
exutil.By("3. Get IPs of the pod2ns1's secondary interface in first namespace.")
pod2ns1IPv4, pod2ns1IPv6 := getPodMultiNetwork(oc, ns1, "blue-pod-2")
exutil.By("4. Get IPs of the pod3ns1's secondary interface in first namespace.")
pod3ns1IPv4, pod3ns1IPv6 := getPodMultiNetwork(oc, ns1, "red-pod-1")
exutil.By("5. Get IPs of the pod1ns2's secondary interface in second namespace.")
pod1ns2IPv4, pod1ns2IPv6 := getPodMultiNetwork(oc, ns2, "blue-pod-3")
exutil.By("6. Get IPs of the pod2ns2's secondary interface in second namespace.")
pod2ns2IPv4, pod2ns2IPv6 := getPodMultiNetwork(oc, ns2, "red-pod-2")
exutil.By("7. All curl should pass before applying policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
exutil.By("8. Create Ingress-allow-same-podSelector-with-same-namespaceSelector policy in ns1")
oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", policyFile, "-n", ns1).Execute()
output, err := oc.AsAdmin().Run("get").Args("multi-networkpolicy", "-n", ns1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("ingress-allow-same-podselector-with-same-namespaceselector"))
exutil.By("9. Same curl testing, one curl pass and three curls will fail after applying policy")
curlPod2PodMultiNetworkFail(oc, ns1, "red-pod-1", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "blue-pod-3", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkFail(oc, ns1, "red-pod-2", pod1ns1IPv4, pod1ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-2", pod1ns1IPv4, pod1ns1IPv6)
exutil.By("10. Delete ingress-allow-same-podselector-with-same-namespaceselector policy in ns1")
removeResource(oc, true, true, "multi-networkpolicy", "ingress-allow-same-podselector-with-same-namespaceselector", "-n", ns1)
exutil.By("11. All curl should pass again after deleting policy")
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod3ns1IPv4, pod3ns1IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod1ns2IPv4, pod1ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns2IPv4, pod2ns2IPv6)
curlPod2PodMultiNetworkPass(oc, ns1, "blue-pod-1", pod2ns1IPv4, pod2ns1IPv6)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.