diff --git a/go.mod b/go.mod index b537d43d4..fd3a81513 100644 --- a/go.mod +++ b/go.mod @@ -65,13 +65,16 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect diff --git a/go.sum b/go.sum index b2f806b7c..6c6bc2257 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,8 @@ github.com/agiledragon/gomonkey/v2 v2.11.0 h1:5oxSgA+tC1xuGsrIorR+sYiziYltmJyEZ9 github.com/agiledragon/gomonkey/v2 v2.11.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -68,6 +70,8 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -89,6 +93,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -96,6 +102,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 252ca3bc0..b2ece8a53 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -15,14 +15,19 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - - "github.com/vmware-tanzu/nsx-operator/pkg/logger" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/utils/ptr" "github.com/vmware-tanzu/nsx-operator/pkg/client/clientset/versioned" + "github.com/vmware-tanzu/nsx-operator/pkg/config" + "github.com/vmware-tanzu/nsx-operator/pkg/logger" "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" "github.com/vmware-tanzu/nsx-operator/pkg/nsx/util" "github.com/vmware-tanzu/nsx-operator/test/e2e/providers" @@ -81,6 +86,7 @@ type TestData struct { clientset clientset.Interface crdClientset versioned.Interface nsxClient *NSXClient + vcClient *vcClient } var testData *TestData @@ -101,21 +107,29 @@ func initProvider() error { return nil } -func NewTestData(nsxConfig string) error { +func NewTestData(nsxConfig string, vcUser string, vcPassword string) error { testData = &TestData{} err := testData.createClients() if err != nil { return err } - err = testData.createNSXClients(nsxConfig) + config.UpdateConfigFilePath(nsxConfig) + cf, err := config.NewNSXOperatorConfigFromFile() + if err != nil { + return err + } + err = testData.createNSXClients(cf) if err != nil { return err } + if vcUser != "" && vcPassword != "" { + testData.vcClient = newVcClient(cf.VCEndPoint, cf.HttpsPort, vcUser, vcPassword) + } return nil } -func (data *TestData) createNSXClients(nsxConfig string) error { - nsxClient, err := NewNSXClient(nsxConfig) +func (data *TestData) createNSXClients(cf *config.NSXOperatorConfig) error { + nsxClient, err := NewNSXClient(cf) if err != nil { return err } @@ -257,12 +271,15 @@ func collectClusterInfo() error { } // createNamespace creates the provided namespace. -func (data *TestData) createNamespace(namespace string) error { +func (data *TestData) createNamespace(namespace string, mutators ...func(ns *corev1.Namespace)) error { ns := corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, }, } + for _, mutator := range mutators { + mutator(&ns) + } if ns, err := data.clientset.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{}); err != nil { // Ignore error if the namespace already exists if !errors.IsAlreadyExists(err) { @@ -334,8 +351,12 @@ func (data *TestData) deploymentWaitForNames(timeout time.Duration, namespace, d return podNames, nil } -// Temporarily disable traffic check -/* +type PodIPs struct { + ipv4 *net.IP + ipv6 *net.IP + ipStrings []string +} + // podWaitFor polls the K8s apiServer until the specified Pod is found (in the test Namespace) and // the condition predicate is met (or until the provided timeout expires). func (data *TestData) podWaitFor(timeout time.Duration, name, namespace string, condition PodCondition) (*corev1.Pod, error) { @@ -364,6 +385,7 @@ func (data *TestData) podWaitForIPs(timeout time.Duration, name, namespace strin return pod.Status.Phase == corev1.PodRunning, nil }) if err != nil { + log.Error(err, "Failed to wait for Pod becoming RUNNING phase", "Pod", name) return nil, err } // According to the K8s API documentation (https://godoc.org/k8s.io/api/core/v1#PodStatus), @@ -381,6 +403,7 @@ func (data *TestData) podWaitForIPs(timeout time.Duration, name, namespace strin } ips, err := parsePodIPs(podIPStrings) if err != nil { + log.Error(err, "Failed to parse Pod's IP", "Pod", name) return nil, err } @@ -399,6 +422,7 @@ func (data *TestData) podWaitForIPs(timeout time.Duration, name, namespace strin return ips, nil } +/* // deploymentWaitForIPsOrNames polls the K8s apiServer until the specified Pod in deployment has an IP address func (data *TestData) deploymentWaitForIPsOrNames(timeout time.Duration, namespace, deployment string) ([]string, []string, error) { podIPStrings := sets.NewString() @@ -431,6 +455,7 @@ func (data *TestData) deploymentWaitForIPsOrNames(timeout time.Duration, namespa } return podIPStrings.List(), podNames, nil } +*/ func parsePodIPs(podIPStrings sets.Set[string]) (*PodIPs, error) { ips := new(PodIPs) @@ -464,7 +489,7 @@ func parsePodIPs(podIPStrings sets.Set[string]) (*PodIPs, error) { // stdout and stderr as strings. An error either indicates that the command couldn't be run or that // the command returned a non-zero error code. func (data *TestData) runCommandFromPod(namespace string, podName string, containerName string, cmd []string) (stdout string, stderr string, err error) { - log.Info("Running '%s' in Pod '%s/%s' container '%s'", strings.Join(cmd, " "), namespace, podName, containerName) + log.Info("Running command in Pod's container", "Namespace", namespace, "Pod", podName, "Container", containerName, "Command", cmd) request := data.clientset.CoreV1().RESTClient().Post(). Namespace(namespace). Resource("pods"). @@ -487,14 +512,15 @@ func (data *TestData) runCommandFromPod(namespace string, podName string, contai Stdout: &stdoutB, Stderr: &stderrB, }); err != nil { - log.Info("Error when running command '%s' in Pod '%s/%s' container '%s': %v", strings.Join(cmd, " "), namespace, podName, containerName, err) + log.Error(err, "Failed to run command in Pod's container", "Namespace", namespace, "Pod", podName, "Container", containerName, "Command", cmd) return stdoutB.String(), stderrB.String(), err } outStr, errStr := stdoutB.String(), stderrB.String() - log.Info("Command '%s' in Pod '%s/%s' container '%s' returned with output: '%s' and error: '%s'", strings.Join(cmd, " "), namespace, podName, containerName, outStr, errStr) + log.Info("Successfully run command in Pod's container", "Namespace", namespace, "Pod", podName, "Container", containerName, "Command", cmd, "stdOut", outStr, "stdErr", errStr) return stdoutB.String(), stderrB.String(), nil } +/* func (data *TestData) runPingCommandFromPod(namespace string, podName string, targetPodIPs *PodIPs, count int) error { var cmd []string if targetPodIPs.ipv4 != nil { @@ -690,3 +716,102 @@ func (data *TestData) waitForResourceExistByPath(pathPolicy string, shouldExist }) return err } + +func (data *TestData) createService(namespace, serviceName string, port, targetPort int32, protocol corev1.Protocol, selector map[string]string, + serviceType corev1.ServiceType, mutators ...func(service *corev1.Service)) (*corev1.Service, error) { + ipFamilies := []corev1.IPFamily{corev1.IPv4Protocol} + + service := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: namespace, + Labels: map[string]string{ + "nsx-op-e2e": serviceName, + "app": serviceName, + }, + }, + Spec: corev1.ServiceSpec{ + SessionAffinity: corev1.ServiceAffinityNone, + Ports: []corev1.ServicePort{{ + Port: port, + TargetPort: intstr.FromInt32(targetPort), + Protocol: protocol, + }}, + Type: serviceType, + Selector: selector, + IPFamilies: ipFamilies, + }, + } + for _, mutator := range mutators { + mutator(&service) + } + return data.clientset.CoreV1().Services(namespace).Create(context.TODO(), &service, metav1.CreateOptions{}) +} + +func (data *TestData) createPod(namespace, podName, containerName, image string, protocol corev1.Protocol, containerPort int32, + mutators ...func(pod *corev1.Pod)) (*corev1.Pod, error) { + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: namespace, + Annotations: map[string]string{}, + Labels: map[string]string{ + "nsx-op-e2e": podName, + "app": podName, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: containerName, + Image: image, + ImagePullPolicy: corev1.PullIfNotPresent, + Ports: []corev1.ContainerPort{ + { + Protocol: protocol, + ContainerPort: containerPort, + }, + }, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + HostNetwork: false, + // Set it to 1s for immediate shutdown to reduce test run time and to avoid affecting subsequent tests. + TerminationGracePeriodSeconds: ptr.To[int64](1), + }, + } + for _, mutator := range mutators { + mutator(&pod) + } + return data.clientset.CoreV1().Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) +} + +func (data *TestData) serviceWaitFor(readyTime time.Duration, namespace string, name string, conditionFunc func(svc *corev1.Service) (bool, error)) (*corev1.Service, error) { + err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, readyTime, false, func(ctx context.Context) (bool, error) { + if svc, err := data.clientset.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { + if errors.IsNotFound(err) { + return false, nil + } + return false, fmt.Errorf("error when getting Service '%s/%s': %v", namespace, name, err) + } else { + return conditionFunc(svc) + } + }) + if err != nil { + return nil, err + } + return data.clientset.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} + +func (data *TestData) deleteService(nsName string, svcName string) error { + ctx := context.TODO() + err := data.clientset.CoreV1().Services(nsName).Delete(ctx, svcName, metav1.DeleteOptions{}) + if err != nil { + log.Error(err, "Failed to delete Service", "namespace", nsName, "name", svcName) + } + return err +} + +func (data *TestData) useWCPSetup() bool { + return data.vcClient != nil +} diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index 5fe2fcf83..fd900c5cc 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -39,7 +39,7 @@ func testMain(m *testing.M) int { log.Info("Creating clientSets") - if err := NewTestData(testOptions.operatorConfigPath); err != nil { + if err := NewTestData(testOptions.operatorConfigPath, testOptions.vcUser, testOptions.vcPassword); err != nil { log.Error(err, "Error when creating client") return 1 } diff --git a/test/e2e/nsxclient.go b/test/e2e/nsxclient.go index d96592bbe..31d92bfc1 100644 --- a/test/e2e/nsxclient.go +++ b/test/e2e/nsxclient.go @@ -14,13 +14,8 @@ type NSXClient struct { *nsx.Client } -func NewNSXClient(configFile string) (*NSXClient, error) { +func NewNSXClient(cf *config.NSXOperatorConfig) (*NSXClient, error) { // nsxClient is used to interact with NSX API. - config.UpdateConfigFilePath(configFile) - cf, err := config.NewNSXOperatorConfigFromFile() - if err != nil { - return nil, err - } client := nsx.GetClient(cf) if client == nil { return nil, fmt.Errorf("failed to get nsx client") diff --git a/test/e2e/precreated_vpc_test.go b/test/e2e/precreated_vpc_test.go new file mode 100644 index 000000000..b45483c55 --- /dev/null +++ b/test/e2e/precreated_vpc_test.go @@ -0,0 +1,420 @@ +package e2e + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "net/http" + "regexp" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/vpc/v1alpha1" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/realizestate" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/vpc" + pkgutil "github.com/vmware-tanzu/nsx-operator/pkg/util" +) + +const ( + preVPCLabelKey = "prevpc-test" + podImage = "netfvt-docker-local.packages.vcfd.broadcom.net/humanux/http_https_echo:latest" + containerName = "netexec-container" + + lbServicePort = int32(8080) + podPort = int32(80) + resourceReadyTime = 5 * time.Minute + nsDeleteTime = 2 * time.Minute + + nsPrivilegedLabel = "pod-security.kubernetes.io/enforce" +) + +var ( + projectPathFormat = "/orgs/%s/projects/%s" + vpcPathFormat = "/orgs/%s/projects/%s/vpcs/%s" + defaultConnectivityProfileFormat = "/orgs/%s/projects/%s/vpc-connectivity-profiles/default" +) + +func TestPreCreatedVPC(t *testing.T) { + orgID, projectID, vpcID := setupVPC(t) + nsName := "test-prevpc" + projectPath := fmt.Sprintf(projectPathFormat, orgID, projectID) + profilePath := fmt.Sprintf(defaultConnectivityProfileFormat, orgID, projectID) + preCreatedVPCPath := fmt.Sprintf(vpcPathFormat, orgID, projectID, vpcID) + log.Info("Created VPC on NSX", "path", preCreatedVPCPath) + defer func() { + log.Info("Deleting the created VPC from NSX", "path", preCreatedVPCPath) + ctx := context.Background() + if pollErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, resourceReadyTime, true, func(ctx context.Context) (done bool, err error) { + if err := testData.nsxClient.VPCClient.Delete(orgID, projectID, vpcID, common.Bool(true)); err != nil { + return false, nil + } + log.Info("The pre-created VPC is successfully deleted", "path", preCreatedVPCPath) + return true, nil + }); pollErr != nil { + log.Error(pollErr, "Failed to delete the pre-created VPC within 5m after the test", "path", preCreatedVPCPath) + } + }() + + // Test: create NetworkConfig and NS using the pre-created VPC + useVCAPI := testData.useWCPSetup() + if useVCAPI { + err := testData.vcClient.startSession() + require.NoError(t, err, "A new VC session should be created for test") + defer func() { + testData.vcClient.closeSession() + }() + } + + err := createVPCNamespace(nsName, projectPath, profilePath, preCreatedVPCPath, nil, useVCAPI) + require.NoError(t, err, "VPCNetworkConfiguration and Namespace should be created") + log.Info("Created test Namespace", "Namespace", nsName) + + defer func() { + deleteVPCNamespace(nsName, useVCAPI) + _, err = testData.nsxClient.VPCClient.Get(orgID, projectID, vpcID) + require.NoError(t, err, "Pre-Created VPC should exist after the K8s Namespace is deleted") + }() + // Wait until the created NetworkInfo is ready. + getNetworkInfoWithPrivateIPs(t, nsName, nsName) + log.Info("New Namespace's networkInfo is ready", "Namespace", nsName) + + // Test create LB Service inside the NS + podName := "prevpc-service-pod" + svcName := "prevpc-loadbalancer" + err = createLBService(nsName, svcName, podName) + require.NoError(t, err, "K8s LoadBalancer typed Service should be created") + log.Info("Created LoadBalancer Service in the Namespace", "Namespace", nsName, "Service", svcName) + + // Wait until Pod has allocated IP + _, err = testData.podWaitForIPs(resourceReadyTime, podName, nsName) + require.NoErrorf(t, err, "Pod '%s/%s' is not ready within time %s", nsName, podName, resourceReadyTime.String()) + log.Info("Server Pod for the LoadBalancer Service in the Namespace is ready", "Namespace", nsName, "Service", svcName, "Pod", podName) + + // Wait until LoadBalancer Service has external IP. + svc, err := testData.serviceWaitFor(resourceReadyTime, nsName, svcName, func(svc *corev1.Service) (bool, error) { + lbStatuses := svc.Status.LoadBalancer.Ingress + if len(lbStatuses) == 0 { + return false, nil + } + lbStatus := lbStatuses[0] + if lbStatus.IP == "" { + return false, nil + } + return true, nil + }) + require.NoErrorf(t, err, "K8s LoadBalancer typed Service should get an external IP within time %s", resourceReadyTime) + svcUID := svc.UID + lbIP := svc.Status.LoadBalancer.Ingress[0].IP + log.Info("Load Balancer Service has been assigned with external IP", "Namespace", nsName, "Service", svcName, "ExternalIP", lbIP) + + // Create client Pod inside the NS + clientPodName := "prevpc-client-pod" + _, err = testData.createPod(nsName, clientPodName, containerName, podImage, corev1.ProtocolTCP, podPort) + require.NoErrorf(t, err, "Client Pod '%s/%s' should be created", nsName, clientPodName) + _, err = testData.podWaitForIPs(resourceReadyTime, clientPodName, nsName) + require.NoErrorf(t, err, "Client Pod '%s/%s' is not ready within time %s", nsName, clientPodName, resourceReadyTime.String()) + log.Info("Client Pod in the Namespace is ready", "Namespace", nsName, "Service", svcName, "Pod", clientPodName) + + // Test traffic from client Pod to LB Service + url := fmt.Sprintf("http://%s:%d", lbIP, lbServicePort) + cmd := []string{ + `/bin/sh`, "-c", fmt.Sprintf(`curl -s -o /dev/null -w %%{http_code} %s`, url), + } + trafficErr := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) { + stdOut, _, err := testData.runCommandFromPod(nsName, clientPodName, containerName, cmd) + if err != nil { + return false, nil + } + statusCode := strings.Trim(stdOut, `"`) + if statusCode != "200" { + log.Info("Failed to access LoadBalancer", "statusCode", statusCode) + return false, nil + } + return true, nil + }) + require.NoError(t, trafficErr, "LoadBalancer traffic should work") + log.Info("Verified traffic from client Pod to the LoadBalancer Service") + + // Test NSX LB VS should be removed after K8s LB Service is deleted + err = testData.deleteService(nsName, svcName) + require.NoError(t, err, "Service should be deleted") + log.Info("Deleted the LoadBalancer Service") + err = testData.waitForLBVSDeletion(resourceReadyTime, string(svcUID)) + require.NoErrorf(t, err, "NSX resources should be removed after K8s LoadBalancer Service is deleted") + log.Info("NSX resources for the LoadBalancer Service are removed") +} + +func deleteVPCNamespace(nsName string, usingVCAPI bool) { + if usingVCAPI { + if err := testData.vcClient.deleteNamespace(nsName); err != nil { + log.Error(err, "Failed to delete Namespace on VCenter", "namespace", nsName) + } + return + } + + vpcConfigName := fmt.Sprintf("%s-vpcconfig-%s", nsName, getRandomString()) + deleteVPCNamespaceOnK8s(nsName, vpcConfigName) +} + +func deleteVPCNamespaceOnK8s(nsName string, vpcConfigName string) { + ctx := context.Background() + if err := testData.deleteNamespace(nsName, nsDeleteTime); err != nil { + log.Error(err, "Failed to delete VPCNetworkConfiguration", "name", vpcConfigName) + } + if err := testData.crdClientset.CrdV1alpha1().VPCNetworkConfigurations().Delete(ctx, vpcConfigName, metav1.DeleteOptions{}); err != nil { + log.Error(err, "Failed to delete VPCNetworkConfiguration %s", "name", vpcConfigName) + } +} + +func setupVPC(tb testing.TB) (string, string, string) { + systemVPC, err := testData.waitForSystemNetworkConfigReady(5 * time.Minute) + require.NoError(tb, err) + + nsxProjectPath := systemVPC.Spec.NSXProject + reExp := regexp.MustCompile(`/orgs/([^/]+)/projects/([^/]+)([/\S+]*)`) + matches := reExp.FindStringSubmatch(nsxProjectPath) + orgID, projectID := matches[1], matches[2] + systemVPCStatus := systemVPC.Status.VPCs[0] + useNSXLB := systemVPCStatus.NSXLoadBalancerPath != "" + + vpcID := fmt.Sprintf("testvpc-%s", getRandomString()) + if err := testData.createVPC(orgID, projectID, vpcID, []string{customizedPrivateCIDR1}, useNSXLB); err != nil { + tb.Fatalf("Unable to create a VPC on NSX: %v", err) + } + return orgID, projectID, vpcID +} + +func createVPCNamespace(nsName, projectPath, profilePath, vpcPath string, privateIPs []string, usingVCAPI bool) error { + if usingVCAPI { + return testData.createPreVPCNamespaceByVCenter(nsName, vpcPath) + } + + vpcConfigName := fmt.Sprintf("%s-vpcconfig-%s", nsName, getRandomString()) + return createVPCNamespaceOnK8s(nsName, vpcConfigName, projectPath, profilePath, vpcPath, privateIPs) +} + +func createVPCNamespaceOnK8s(nsName, vpcConfigName, projectPath, profilePath, vpcPath string, privateIPs []string) error { + ctx := context.Background() + vpcNetConfig := &v1alpha1.VPCNetworkConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: vpcConfigName, + }, + Spec: v1alpha1.VPCNetworkConfigurationSpec{ + NSXProject: projectPath, + VPCConnectivityProfile: profilePath, + VPC: vpcPath, + PrivateIPs: privateIPs, + }, + } + + if _, err := testData.crdClientset.CrdV1alpha1().VPCNetworkConfigurations().Create(ctx, vpcNetConfig, metav1.CreateOptions{}); err != nil { + log.Error(err, "Failed to create VPCNetworkConfiguration", "name", vpcNetConfig) + return err + } + + if err := testData.createNamespace(nsName, func(ns *corev1.Namespace) { + if ns.Annotations == nil { + ns.Annotations = map[string]string{} + } + ns.Annotations[common.AnnotationVPCNetworkConfig] = vpcConfigName + if ns.Labels == nil { + ns.Labels = map[string]string{} + } + ns.Labels[nsPrivilegedLabel] = "privileged" + }); err != nil { + // Clean up the created VPCNetworkConfiguration. + log.Error(err, "Failed to create Namespace", "name", nsName) + if delErr := testData.crdClientset.CrdV1alpha1().VPCNetworkConfigurations().Delete(ctx, vpcConfigName, metav1.DeleteOptions{}); delErr != nil { + log.Error(err, "Failed to delete VPCNetworkConfiguration", "name", vpcConfigName) + } + return err + } + return nil +} + +func createLBService(nsName, svcName, podName string) error { + podLabels := map[string]string{ + preVPCLabelKey: svcName, + } + if _, err := testData.createPod(nsName, podName, containerName, podImage, corev1.ProtocolTCP, podPort, func(pod *corev1.Pod) { + for k, v := range podLabels { + pod.Labels[k] = v + } + }); err != nil { + log.Error(err, "Failed to create Pod", "namespace", nsName, "name", podName) + return err + } + if _, err := testData.createService(nsName, svcName, lbServicePort, podPort, corev1.ProtocolTCP, podLabels, corev1.ServiceTypeLoadBalancer); err != nil { + log.Error(err, "Failed to create LoadBalancer Service", "namespace", nsName, "name", svcName) + return err + } + return nil +} + +func getRandomString() string { + timestamp := time.Now().UnixNano() + hash := sha256.Sum256([]byte(fmt.Sprintf("%d", timestamp))) + return hex.EncodeToString(hash[:])[:8] +} + +func (data *TestData) createVPC(orgID, projectID, vpcID string, privateIPs []string, useNSXLB bool) error { + createdVPC := &model.Vpc{ + Id: common.String(vpcID), + DisplayName: common.String("e2e-test-pre-vpc"), + IpAddressType: common.String("IPV4"), + PrivateIps: privateIPs, + ResourceType: common.String(common.ResourceTypeVpc), + } + vpcPath := fmt.Sprintf(vpcPathFormat, orgID, projectID, vpcID) + var lbsPath string + var createdLBS *model.LBService + if !useNSXLB { + loadBalancerVPCEndpointEnabled := true + createdVPC.LoadBalancerVpcEndpoint = &model.LoadBalancerVPCEndpoint{Enabled: &loadBalancerVPCEndpointEnabled} + } else { + lbsPath = fmt.Sprintf("%s/vpc-lbs/default", vpcPath) + createdLBS = &model.LBService{ + Id: common.String("default"), + ConnectivityPath: common.String(vpcPath), + Size: common.String(model.LBService_SIZE_SMALL), + ResourceType: common.String(common.ResourceTypeLBService), + } + } + attachmentPath := fmt.Sprintf("%s/attachments/default", vpcPath) + attachment := &model.VpcAttachment{ + Id: common.String("default"), + VpcConnectivityProfile: common.String(fmt.Sprintf(defaultConnectivityProfileFormat, orgID, projectID)), + } + svc := &vpc.VPCService{} + orgRoot, err := svc.WrapHierarchyVPC(orgID, projectID, createdVPC, createdLBS, attachment) + if err != nil { + log.Error(err, "Failed to build HAPI request for VPC related resources") + return err + } + enforceRevisionCheckParam := false + if err := data.nsxClient.OrgRootClient.Patch(*orgRoot, &enforceRevisionCheckParam); err != nil { + return err + } + + log.Info("Successfully requested VPC on NSX", "path", vpcPath) + realizeService := realizestate.InitializeRealizeState(common.Service{NSXClient: data.nsxClient.Client}) + if pollErr := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 5*time.Minute, true, func(ctx context.Context) (done bool, err error) { + if err = realizeService.CheckRealizeState(pkgutil.NSXTDefaultRetry, vpcPath); err != nil { + log.Error(err, "NSX VPC is not yet realized", "path", vpcPath) + return false, nil + } + if lbsPath != "" { + if err := realizeService.CheckRealizeState(pkgutil.NSXTLBVSDefaultRetry, lbsPath); err != nil { + log.Error(err, "NSX LBS is not yet realized", "path", lbsPath) + return false, nil + } + } + if err = realizeService.CheckRealizeState(pkgutil.NSXTDefaultRetry, attachmentPath); err != nil { + log.Error(err, "VPC attachment is not yet realized", "path", attachmentPath) + return false, nil + } + return true, nil + }); pollErr != nil { + log.Error(pollErr, "Failed to realize VPC and related resources within 2m") + data.nsxClient.VPCClient.Delete(orgID, projectID, vpcID, common.Bool(true)) + if err := data.nsxClient.VPCClient.Delete(orgID, projectID, vpcID, common.Bool(true)); err != nil { + log.Error(err, "Failed to recursively delete NSX VPC", "path", fmt.Sprintf("/orgs/%s/projects/%s/vpcs/%s", orgID, projectID, vpcID)) + } + return pollErr + } + return nil +} + +func (data *TestData) waitForSystemNetworkConfigReady(timeout time.Duration) (*v1alpha1.VPCNetworkConfiguration, error) { + var systemConfig *v1alpha1.VPCNetworkConfiguration + if pollErr := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, timeout, true, func(ctx context.Context) (done bool, err error) { + systemConfig, err = data.crdClientset.CrdV1alpha1().VPCNetworkConfigurations().Get(ctx, "system", metav1.GetOptions{}) + if err != nil { + return false, nil + } + if len(systemConfig.Status.VPCs) == 0 { + return false, nil + } + systemVPC := systemConfig.Status.VPCs[0] + if systemVPC.VPCPath == "" { + return false, nil + } + return true, nil + }); pollErr != nil { + log.Error(pollErr, "Failed to wait for system VPCNetworkConfiguration to be ready", "timeout", timeout.String()) + return nil, pollErr + } + return systemConfig, nil +} + +func (data *TestData) waitForLBVSDeletion(timeout time.Duration, svcID string) error { + lbServiceTags := []string{"ncp/service_uid", svcID} + if pollErr := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, timeout, true, func(ctx context.Context) (done bool, err error) { + // Check NSX VirtualServer deletion. + vsResults, err := data.queryResource("LBVirtualServer", lbServiceTags) + if err != nil { + return false, err + } + if len(vsResults.Results) > 0 { + return false, nil + } + // Check NSX LBPool deletion. + lbPoolResults, err := data.queryResource("LBPool", lbServiceTags) + if err != nil { + return false, err + } + if len(lbPoolResults.Results) > 0 { + return false, nil + } + // Check NSX IP Allocation deletion. + ipAllocationResults, err := data.queryResource("VpcIpAddressAllocation", lbServiceTags) + if err != nil { + return false, err + } + if len(ipAllocationResults.Results) > 0 { + return false, nil + } + return true, nil + }); pollErr != nil { + log.Error(pollErr, "Failed to delete LoadBalancer Service related resources on NSX") + return pollErr + } + return nil +} + +func (data *TestData) createPreVPCNamespaceByVCenter(nsName, vpcPath string) error { + svID, err := data.vcClient.getSupervisorID() + if err != nil { + return fmt.Errorf("failed to get a valid supervisor ID: %v", err) + } + err = data.vcClient.createNamespaceWithPreCreatedVPC(nsName, vpcPath, svID) + if err != nil { + return fmt.Errorf("failed to create Namespace on VCenter: %v", err) + } + ctx := context.Background() + getErr := wait.PollUntilContextTimeout(ctx, 2*time.Second, 10*time.Second, false, func(ctx context.Context) (done bool, err error) { + _, statusCode, err := data.vcClient.getNamespaceInfoByName(nsName) + if statusCode == http.StatusNotFound { + return false, nil + } + if err != nil { + return true, err + } + return true, nil + }) + if getErr != nil { + data.vcClient.deleteNamespace(nsName) + return fmt.Errorf("failed to create Namespace on VCenter, delete it: %v", err) + } + + return nil +} diff --git a/test/e2e/vclient.go b/test/e2e/vclient.go new file mode 100644 index 000000000..82f15bc0e --- /dev/null +++ b/test/e2e/vclient.go @@ -0,0 +1,261 @@ +package e2e + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "sync" + "time" + + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/util" +) + +type vcClient struct { + url *url.URL + httpClient *http.Client + sessionMutex sync.Mutex + sessionKey string +} + +type supervisorInfo struct { + Name string `json:"name"` + ConfigStatus string `json:"config_status"` + K8sStatus string `json:"kubernetes_status"` +} + +type supervisorSummary struct { + ID string `json:"supervisor"` + Info supervisorInfo `json:"info"` +} + +type InstancesIpv4Cidr struct { + Address string `json:"address"` + Prefix int64 `json:"prefix"` +} + +type InstancesVpcConfig struct { + PrivateCidrs []InstancesIpv4Cidr `json:"private_cidrs"` +} + +type InstancesVpcNetworkInfo struct { + VpcConfig InstancesVpcConfig `json:"vpc_config,omitempty"` + Vpc string `json:"vpc,omitempty"` + DefaultSubnetSize int64 `json:"default_subnet_size"` +} + +type InstancesNetworkConfigInfo struct { + NetworkProvider string `json:"network_provider"` + VpcNetwork InstancesVpcNetworkInfo `json:"vpc_network"` +} + +type VCNamespaceCreateSpec struct { + Supervisor string `json:"supervisor"` + Namespace string `json:"namespace"` + NetworkSpec InstancesNetworkConfigInfo `json:"network_spec"` +} + +type VCNamespaceGetInfo struct { + Supervisor string `json:"supervisor"` + NetworkSpec InstancesNetworkConfigInfo `json:"network_spec"` +} + +var ( + sessionURLPath = "/api/session" +) + +func newVcClient(hostname string, port int, userName, password string) *vcClient { + httpClient := createHttpClient() + baseurl := fmt.Sprintf("https://%s:%d/", hostname, port) + vcurl, _ := url.Parse(baseurl) + + vcurl.User = url.UserPassword(userName, password) + return &vcClient{ + url: vcurl, + httpClient: httpClient, + } +} + +func createHttpClient() *http.Client { + tlsConfig := &tls.Config{InsecureSkipVerify: true} // #nosec G402: ignore insecure options + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + return &http.Client{Transport: transport, Timeout: time.Minute} +} + +func (c *vcClient) startSession() error { + c.sessionMutex.Lock() + defer c.sessionMutex.Unlock() + if c.sessionKey == "" { + url := fmt.Sprintf("%s://%s%s", c.url.Scheme, c.url.Host, sessionURLPath) + request, err := http.NewRequest(http.MethodPost, url, nil) + if err != nil { + return err + } + username := c.url.User.Username() + password, _ := c.url.User.Password() + request.SetBasicAuth(username, password) + + var sessionData string + if _, err = c.handleRequest(request, &sessionData); err != nil { + return err + } + + c.sessionKey = sessionData + } + return nil +} + +func (c *vcClient) closeSession() error { + c.sessionMutex.Lock() + defer c.sessionMutex.Unlock() + if c.sessionKey == "" { + return nil + } + request, err := c.prepareRequest(http.MethodDelete, sessionURLPath, nil) + if err != nil { + return err + } + + if _, err = c.handleRequest(request, nil); err != nil { + return err + } + + c.sessionKey = "" + return nil +} + +func (c *vcClient) getSupervisorID() (string, error) { + urlPath := "/api/vcenter/namespace-management/supervisors/summaries" + request, err := c.prepareRequest(http.MethodGet, urlPath, nil) + if err != nil { + return "", err + } + var response struct { + Items []supervisorSummary `json:"items"` + } + if _, err = c.handleRequest(request, &response); err != nil { + return "", err + } + + for _, sv := range response.Items { + if sv.Info.ConfigStatus == "RUNNING" { + return sv.ID, nil + } + } + return "", fmt.Errorf("no valid supervisor found on vCenter") +} + +func (c *vcClient) createNamespaceWithPreCreatedVPC(namespace string, vpcPath string, supervisorID string) error { + urlPath := "/api/vcenter/namespaces/instances/v2" + vcNamespace := createVCNamespaceSpec(namespace, supervisorID, vpcPath) + data, err := json.Marshal(vcNamespace) + if err != nil { + return fmt.Errorf("unable convert vcNamespace object to json bytes: %v", err) + } + request, err := c.prepareRequest(http.MethodPost, urlPath, data) + if err != nil { + return fmt.Errorf("failed to parepare http request with vcNamespace data: %v", err) + } + if _, err = c.handleRequest(request, nil); err != nil { + return err + } + return nil +} + +func (c *vcClient) getNamespaceInfoByName(namespace string) (*VCNamespaceGetInfo, int, error) { + urlPath := fmt.Sprintf("/api/vcenter/namespaces/instances/v2/%s", namespace) + request, err := c.prepareRequest(http.MethodGet, urlPath, nil) + if err != nil { + return nil, 0, fmt.Errorf("failed to prepare http request with vcNamespace get: %v", err) + } + result := &VCNamespaceGetInfo{} + statusCode, err := c.handleRequest(request, result) + if err != nil { + return nil, statusCode, err + } + return result, statusCode, nil +} + +func (c *vcClient) deleteNamespace(namespace string) error { + urlPath := fmt.Sprintf("/api/vcenter/namespaces/instances/%s", namespace) + request, err := c.prepareRequest(http.MethodDelete, urlPath, nil) + if err != nil { + return fmt.Errorf("failed to parepare http request with vcNamespace deletion: %v", err) + } + if _, err = c.handleRequest(request, nil); err != nil { + return err + } + return nil +} + +func createVCNamespaceSpec(namespace string, svID string, vpcPath string) *VCNamespaceCreateSpec { + return &VCNamespaceCreateSpec{ + Supervisor: svID, + Namespace: namespace, + NetworkSpec: InstancesNetworkConfigInfo{ + NetworkProvider: "NSX_VPC", + VpcNetwork: InstancesVpcNetworkInfo{ + Vpc: vpcPath, + DefaultSubnetSize: 16, + }, + }, + } +} + +func (c *vcClient) prepareRequest(method string, urlPath string, data []byte) (*http.Request, error) { + url := fmt.Sprintf("%s://%s%s", c.url.Scheme, c.url.Host, urlPath) + req, err := http.NewRequest(method, url, bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("vmware-api-session-id", c.sessionKey) + return req, nil +} + +func (c *vcClient) handleRequest(request *http.Request, responseData interface{}) (int, error) { + response, err := c.httpClient.Do(request) + if err != nil { + log.Error(err, "Failed to do HTTP request") + return 0, err + } + return handleHTTPResponse(response, responseData) +} + +func handleHTTPResponse(response *http.Response, result interface{}) (int, error) { + statusCode := response.StatusCode + if statusCode == http.StatusNoContent { + return statusCode, nil + } + + if statusCode >= http.StatusOK && statusCode < http.StatusMultipleChoices { + if result == nil { + return statusCode, nil + } + body, err := io.ReadAll(response.Body) + defer response.Body.Close() + + if err != nil { + return statusCode, err + } + if err = json.Unmarshal(body, result); err != nil { + return statusCode, err + } + return statusCode, nil + } + + var err error + if statusCode == http.StatusNotFound { + err = util.HttpNotFoundError + } else if statusCode == http.StatusBadRequest { + err = util.HttpBadRequest + } else { + err = fmt.Errorf("HTTP response with errorcode %d", response.StatusCode) + } + return statusCode, err +}