From 8a4b4c0d270e38048f3d57039bbb5c482ea3aded Mon Sep 17 00:00:00 2001 From: Yiyi Zhou <91219164+zyiyi11@users.noreply.github.com> Date: Tue, 30 Jul 2024 10:55:21 -0700 Subject: [PATCH] Update zone placement under WCP_Workload_Domain_Isolation (#638) --- Makefile | 3 + .../topology.tanzu.vmware.com_zones.yaml | 65 +++++++-- config/rbac/role.yaml | 16 +++ external/tanzu-topology/api/v1alpha1/zone.go | 27 +++- .../api/v1alpha1/zz_generated.deepcopy.go | 28 +++- .../vsphere/placement/zone_placement.go | 61 ++++++-- pkg/topology/availability_zones.go | 79 ++++++++++- pkg/topology/availability_zones_test.go | 134 +++++++++++++++++- test/builder/dummies.go | 37 +++-- .../validation/virtualmachine_validator.go | 14 +- .../virtualmachine_validator_unit_test.go | 124 ++++++++++++---- 11 files changed, 503 insertions(+), 85 deletions(-) diff --git a/Makefile b/Makefile index 6dc0926a9..7a630b692 100644 --- a/Makefile +++ b/Makefile @@ -287,6 +287,9 @@ generate-go: ## Generate golang sources $(CONTROLLER_GEN) \ paths=github.com/vmware-tanzu/vm-operator/external/storage-policy-quota/... \ object:headerFile=./hack/boilerplate/boilerplate.generatego.txt + $(CONTROLLER_GEN) \ + paths=github.com/vmware-tanzu/vm-operator/external/tanzu-topology/... \ + object:headerFile=./hack/boilerplate/boilerplate.generatego.txt $(MAKE) -C ./pkg/util/cloudinit/schema $@ .PHONY: generate-manifests diff --git a/config/crd/external-crds/topology.tanzu.vmware.com_zones.yaml b/config/crd/external-crds/topology.tanzu.vmware.com_zones.yaml index dccaee9ea..fce99bb38 100644 --- a/config/crd/external-crds/topology.tanzu.vmware.com_zones.yaml +++ b/config/crd/external-crds/topology.tanzu.vmware.com_zones.yaml @@ -67,19 +67,62 @@ spec: - apiVersion - name type: object - folderMoID: + managedVMs: description: |- - FolderMoID is the managed object ID of the vSphere Folder for a - Namespace. - type: string - poolMoIDs: + ManagedVMs contains ResourcePool and folder moIDs to represent managedVMs + entity within the namespace + properties: + folderMoID: + description: |- + FolderMoID is the managed object ID of the vSphere Folder for a + Namespace. + type: string + poolMoIDs: + description: |- + PoolMoIDs are the managed object ID of the vSphere ResourcePools + in an individual vSphere Zone. A zone may be comprised of + multiple ResourcePools. + items: + type: string + type: array + type: object + namespace: + description: Namespace contains ResourcePool and folder moIDs to represent + the namespace + properties: + folderMoID: + description: |- + FolderMoID is the managed object ID of the vSphere Folder for a + Namespace. + type: string + poolMoIDs: + description: |- + PoolMoIDs are the managed object ID of the vSphere ResourcePools + in an individual vSphere Zone. A zone may be comprised of + multiple ResourcePools. + items: + type: string + type: array + type: object + vSpherePods: description: |- - PoolMoIDs are the managed object ID of the vSphere ResourcePools for a - Namespace in an individual vSphere Zone. A zone may be comprised of - multiple ResourcePools. - items: - type: string - type: array + VSpherePods contains ResourcePool and folder moIDs to represent vSpherePods + entity within the namespace + properties: + folderMoID: + description: |- + FolderMoID is the managed object ID of the vSphere Folder for a + Namespace. + type: string + poolMoIDs: + description: |- + PoolMoIDs are the managed object ID of the vSphere ResourcePools + in an individual vSphere Zone. A zone may be comprised of + multiple ResourcePools. + items: + type: string + type: array + type: object required: - availabilityZoneReference type: object diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 546ccb858..a524df85d 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -252,6 +252,22 @@ rules: - get - list - watch +- apiGroups: + - topology.tanzu.vmware.com + resources: + - zones + verbs: + - get + - list + - watch +- apiGroups: + - topology.tanzu.vmware.com + resources: + - zones/status + verbs: + - get + - list + - watch - apiGroups: - vmoperator.vmware.com resources: diff --git a/external/tanzu-topology/api/v1alpha1/zone.go b/external/tanzu-topology/api/v1alpha1/zone.go index abd3503d3..93b2718b3 100644 --- a/external/tanzu-topology/api/v1alpha1/zone.go +++ b/external/tanzu-topology/api/v1alpha1/zone.go @@ -18,13 +18,12 @@ type AvailabilityZoneReference struct { Name string `json:"name"` } -// ZoneSpec contains identifying information about the -// vSphere resources used to represent a Kubernetes namespace on individual -// vSphere Zones. -type ZoneSpec struct { +// VSphereEntityInfo contains the managed object IDs associated with +// a vSphere entity +type VSphereEntityInfo struct { // +optional - // PoolMoIDs are the managed object ID of the vSphere ResourcePools for a - // Namespace in an individual vSphere Zone. A zone may be comprised of + // PoolMoIDs are the managed object ID of the vSphere ResourcePools + // in an individual vSphere Zone. A zone may be comprised of // multiple ResourcePools. PoolMoIDs []string `json:"poolMoIDs,omitempty"` @@ -32,6 +31,22 @@ type ZoneSpec struct { // FolderMoID is the managed object ID of the vSphere Folder for a // Namespace. FolderMoID string `json:"folderMoID,omitempty"` +} + +// ZoneSpec contains identifying information about the +// vSphere resources used to represent a Kubernetes namespace on individual +// vSphere Zones. +type ZoneSpec struct { + // Namespace contains ResourcePool and folder moIDs to represent the namespace + Namespace VSphereEntityInfo `json:"namespace,omitempty"` + + // VSpherePods contains ResourcePool and folder moIDs to represent vSpherePods + // entity within the namespace + VSpherePods VSphereEntityInfo `json:"vSpherePods,omitempty"` + + // ManagedVMs contains ResourcePool and folder moIDs to represent managedVMs + // entity within the namespace + ManagedVMs VSphereEntityInfo `json:"managedVMs,omitempty"` // Zone is a reference to the cluster scoped AvailabilityZone this // Zone is derived from. diff --git a/external/tanzu-topology/api/v1alpha1/zz_generated.deepcopy.go b/external/tanzu-topology/api/v1alpha1/zz_generated.deepcopy.go index 841857cd6..654825197 100644 --- a/external/tanzu-topology/api/v1alpha1/zz_generated.deepcopy.go +++ b/external/tanzu-topology/api/v1alpha1/zz_generated.deepcopy.go @@ -148,6 +148,26 @@ func (in *NamespaceInfo) DeepCopy() *NamespaceInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereEntityInfo) DeepCopyInto(out *VSphereEntityInfo) { + *out = *in + if in.PoolMoIDs != nil { + in, out := &in.PoolMoIDs, &out.PoolMoIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereEntityInfo. +func (in *VSphereEntityInfo) DeepCopy() *VSphereEntityInfo { + if in == nil { + return nil + } + out := new(VSphereEntityInfo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSphereZone) DeepCopyInto(out *VSphereZone) { *out = *in @@ -299,11 +319,9 @@ func (in *ZoneList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZoneSpec) DeepCopyInto(out *ZoneSpec) { *out = *in - if in.PoolMoIDs != nil { - in, out := &in.PoolMoIDs, &out.PoolMoIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } + in.Namespace.DeepCopyInto(&out.Namespace) + in.VSpherePods.DeepCopyInto(&out.VSpherePods) + in.ManagedVMs.DeepCopyInto(&out.ManagedVMs) out.Zone = in.Zone } diff --git a/pkg/providers/vsphere/placement/zone_placement.go b/pkg/providers/vsphere/placement/zone_placement.go index baef06960..a010503cf 100644 --- a/pkg/providers/vsphere/placement/zone_placement.go +++ b/pkg/providers/vsphere/placement/zone_placement.go @@ -93,30 +93,63 @@ func getPlacementCandidates( zonePlacement bool, childRPName string) (map[string][]string, error) { - var zones []topologyv1.AvailabilityZone + candidates := map[string][]string{} + + // When FSS_WCP_WORKLOAD_DOMAIN_ISOLATION is enabled, use namespaced Zone CR to get candidate resource pools. + if pkgcfg.FromContext(vmCtx).Features.WorkloadDomainIsolation { + var zones []topologyv1.Zone + if zonePlacement { + z, err := topology.GetZones(vmCtx, client, vmCtx.VM.Namespace) + if err != nil { + return nil, err + } + zones = z + } else { + zone, err := topology.GetZone(vmCtx, client, vmCtx.VM.Labels[topology.KubernetesTopologyZoneLabelKey], vmCtx.VM.Namespace) + if err != nil { + return nil, err + } + zones = append(zones, zone) + } + for _, zone := range zones { + rpMoIDs := zone.Spec.ManagedVMs.PoolMoIDs + if childRPName != "" { + childRPMoIDs := lookupChildRPs(vmCtx, vcClient, rpMoIDs, zone.Name, childRPName) + if len(childRPMoIDs) == 0 { + vmCtx.Logger.Info("Zone had no candidates after looking up children ResourcePools", + "zone", zone.Name, "rpMoIDs", rpMoIDs, "childRPName", childRPName) + continue + } + rpMoIDs = childRPMoIDs + } + candidates[zone.Name] = rpMoIDs + } + return candidates, nil + } + + // When FSS_WCP_WORKLOAD_DOMAIN_ISOLATION is disabled, use cluster scoped AvailabilityZone CR to get candidate resource pools. + var azs []topologyv1.AvailabilityZone if zonePlacement { - z, err := topology.GetAvailabilityZones(vmCtx, client) + az, err := topology.GetAvailabilityZones(vmCtx, client) if err != nil { return nil, err } - zones = z + azs = az } else { // Consider candidates only within the already assigned zone. - // NOTE: GetAvailabilityZone() will return a "default" AZ when the FSS is not enabled. - zone, err := topology.GetAvailabilityZone(vmCtx, client, vmCtx.VM.Labels[topology.KubernetesTopologyZoneLabelKey]) + // NOTE: GetAvailabilityZone() will return a "default" AZ when WCP_FaultDomains FSS is not enabled. + az, err := topology.GetAvailabilityZone(vmCtx, client, vmCtx.VM.Labels[topology.KubernetesTopologyZoneLabelKey]) if err != nil { return nil, err } - zones = append(zones, zone) + azs = append(azs, az) } - candidates := map[string][]string{} - - for _, zone := range zones { - nsInfo, ok := zone.Spec.Namespaces[vmCtx.VM.Namespace] + for _, az := range azs { + nsInfo, ok := az.Spec.Namespaces[vmCtx.VM.Namespace] if !ok { continue } @@ -129,16 +162,16 @@ func getPlacementCandidates( } if childRPName != "" { - childRPMoIDs := lookupChildRPs(vmCtx, vcClient, rpMoIDs, zone.Name, childRPName) + childRPMoIDs := lookupChildRPs(vmCtx, vcClient, rpMoIDs, az.Name, childRPName) if len(childRPMoIDs) == 0 { - vmCtx.Logger.Info("Zone had no candidates after looking up children ResourcePools", - "zone", zone.Name, "rpMoIDs", rpMoIDs, "childRPName", childRPName) + vmCtx.Logger.Info("AvailabilityZone had no candidates after looking up children ResourcePools", + "az", az.Name, "rpMoIDs", rpMoIDs, "childRPName", childRPName) continue } rpMoIDs = childRPMoIDs } - candidates[zone.Name] = rpMoIDs + candidates[az.Name] = rpMoIDs } return candidates, nil diff --git a/pkg/topology/availability_zones.go b/pkg/topology/availability_zones.go index 49c0c25d6..e2583f04f 100644 --- a/pkg/topology/availability_zones.go +++ b/pkg/topology/availability_zones.go @@ -11,6 +11,7 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" topologyv1 "github.com/vmware-tanzu/vm-operator/external/tanzu-topology/api/v1alpha1" + pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" ) const ( @@ -22,10 +23,14 @@ const ( var ( // ErrNoAvailabilityZones occurs when no availability zones are detected. ErrNoAvailabilityZones = errors.New("no availability zones") + + ErrNoZones = errors.New("no zones in specified namespace") ) // +kubebuilder:rbac:groups=topology.tanzu.vmware.com,resources=availabilityzones,verbs=get;list;watch // +kubebuilder:rbac:groups=topology.tanzu.vmware.com,resources=availabilityzones/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=topology.tanzu.vmware.com,resources=zones,verbs=get;list;watch +// +kubebuilder:rbac:groups=topology.tanzu.vmware.com,resources=zones/status,verbs=get;list;watch // LookupZoneForClusterMoID returns the zone for the given Cluster MoID. func LookupZoneForClusterMoID( @@ -59,6 +64,17 @@ func GetNamespaceFolderAndRPMoID( client ctrlclient.Client, availabilityZoneName, namespace string) (string, string, error) { + if pkgcfg.FromContext(ctx).Features.WorkloadDomainIsolation { + zone, err := GetZone(ctx, client, availabilityZoneName, namespace) + if err != nil { + return "", "", err + } + if len(zone.Spec.ManagedVMs.PoolMoIDs) != 0 { + return zone.Spec.ManagedVMs.FolderMoID, zone.Spec.ManagedVMs.PoolMoIDs[0], nil + } + return zone.Spec.ManagedVMs.FolderMoID, "", nil + } + availabilityZone, err := GetAvailabilityZone(ctx, client, availabilityZoneName) if err != nil { return "", "", err @@ -84,14 +100,27 @@ func GetNamespaceFolderAndRPMoIDs( client ctrlclient.Client, namespace string) (string, []string, error) { + var folderMoID string + var rpMoIDs []string + + if pkgcfg.FromContext(ctx).Features.WorkloadDomainIsolation { + zones, err := GetZones(ctx, client, namespace) + // If no Zones found in namespace, do not return err. + if err != nil && !errors.Is(err, ErrNoZones) { + return "", nil, err + } + for _, zone := range zones { + folderMoID = zone.Spec.ManagedVMs.FolderMoID + rpMoIDs = append(rpMoIDs, zone.Spec.ManagedVMs.PoolMoIDs...) + } + return folderMoID, rpMoIDs, nil + } + availabilityZones, err := GetAvailabilityZones(ctx, client) if err != nil { return "", nil, err } - var folderMoID string - var rpMoIDs []string - for _, az := range availabilityZones { if nsInfo, ok := az.Spec.Namespaces[namespace]; ok { folderMoID = nsInfo.FolderMoId @@ -112,6 +141,20 @@ func GetNamespaceFolderMoID( client ctrlclient.Client, namespace string) (string, error) { + if pkgcfg.FromContext(ctx).Features.WorkloadDomainIsolation { + zones, err := GetZones(ctx, client, namespace) + // If no Zones found in namespace, do not return err here. + if err != nil && !errors.Is(err, ErrNoZones) { + return "", err + } + // Note that the Folder is VC-scoped, but we store the Folder MoID in each Zone CR + // so we can return the first match. + for _, zone := range zones { + return zone.Spec.ManagedVMs.FolderMoID, nil + } + return "", fmt.Errorf("unable to get FolderMoID for namespace %s", namespace) + } + availabilityZones, err := GetAvailabilityZones(ctx, client) if err != nil { return "", err @@ -156,3 +199,33 @@ func GetAvailabilityZone( err := client.Get(ctx, ctrlclient.ObjectKey{Name: availabilityZoneName}, &availabilityZone) return availabilityZone, err } + +// GetZones returns a list of the Zone resources in a namespace. +func GetZones( + ctx context.Context, + client ctrlclient.Client, + namespace string) ([]topologyv1.Zone, error) { + + zoneList := &topologyv1.ZoneList{} + if err := client.List(ctx, zoneList, ctrlclient.InNamespace(namespace)); err != nil { + return nil, err + } + + if len(zoneList.Items) == 0 { + return nil, ErrNoZones + } + + return zoneList.Items, nil +} + +// GetZone returns a namespaced Zone resource. +func GetZone( + ctx context.Context, + client ctrlclient.Client, + zoneName string, + namespace string) (topologyv1.Zone, error) { + + var zone topologyv1.Zone + err := client.Get(ctx, ctrlclient.ObjectKey{Name: zoneName, Namespace: namespace}, &zone) + return zone, err +} diff --git a/pkg/topology/availability_zones_test.go b/pkg/topology/availability_zones_test.go index b88e7439d..bcabd0a1d 100644 --- a/pkg/topology/availability_zones_test.go +++ b/pkg/topology/availability_zones_test.go @@ -25,11 +25,12 @@ const ( folderMoID = "folder-moid" ) -var _ = Describe("Availability Zones", func() { +var _ = Describe("Availability Zones and Zones", func() { var ( ctx context.Context client ctrlclient.Client numberOfAvailabilityZones int + numberOfZonesPerNamespace int numberOfNamespaces int specCCRID bool ) @@ -44,6 +45,7 @@ var _ = Describe("Availability Zones", func() { ctx = nil client = nil numberOfAvailabilityZones = 0 + numberOfZonesPerNamespace = 0 numberOfNamespaces = 0 }) @@ -70,6 +72,23 @@ var _ = Describe("Availability Zones", func() { } Expect(client.Create(ctx, obj)).To(Succeed()) } + for i := 0; i < numberOfNamespaces; i++ { + for j := 0; j < numberOfZonesPerNamespace; j++ { + obj := &topologyv1.Zone{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: fmt.Sprintf("ns-%d", i), + Name: fmt.Sprintf("zone-%d", j), + }, + Spec: topologyv1.ZoneSpec{ + ManagedVMs: topologyv1.VSphereEntityInfo{ + PoolMoIDs: []string{poolMoID}, + FolderMoID: folderMoID, + }, + }, + } + Expect(client.Create(ctx, obj)).To(Succeed()) + } + } }) assertGetAvailabilityZonesSuccess := func() { @@ -78,6 +97,14 @@ var _ = Describe("Availability Zones", func() { ExpectWithOffset(1, zones).To(HaveLen(numberOfAvailabilityZones)) } + assertGetZonesSuccess := func() { + for i := 0; i < numberOfNamespaces; i++ { + zones, err := topology.GetZones(ctx, client, fmt.Sprintf("ns-%d", i)) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) + ExpectWithOffset(1, zones).To(HaveLen(numberOfZonesPerNamespace)) + } + } + assertGetAvailabilityZoneSuccess := func() { for i := 0; i < numberOfAvailabilityZones; i++ { _, err := topology.GetAvailabilityZone(ctx, client, fmt.Sprintf("az-%d", i)) @@ -85,11 +112,27 @@ var _ = Describe("Availability Zones", func() { } } + assertGetZoneSuccess := func() { + for i := 0; i < numberOfNamespaces; i++ { + for j := 0; j < numberOfZonesPerNamespace; j++ { + _, err := topology.GetZone(ctx, client, fmt.Sprintf("zone-%d", j), fmt.Sprintf("ns-%d", i)) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) + } + } + } + assertGetAvailabilityZonesErrNoAvailabilityZones := func() { _, err := topology.GetAvailabilityZones(ctx, client) ExpectWithOffset(1, err).To(MatchError(topology.ErrNoAvailabilityZones)) } + assertGetZonesErrNoZones := func() { + for i := 0; i < numberOfNamespaces; i++ { + _, err := topology.GetZones(ctx, client, fmt.Sprintf("ns-%d", i)) + ExpectWithOffset(1, err).To(MatchError(topology.ErrNoZones)) + } + } + assertGetAvailabilityZoneValidNamesErrNotFound := func() { for i := 0; i < numberOfAvailabilityZones; i++ { _, err := topology.GetAvailabilityZone(ctx, client, fmt.Sprintf("az-%d", i)) @@ -102,11 +145,25 @@ var _ = Describe("Availability Zones", func() { ExpectWithOffset(1, apierrors.IsNotFound(err)).To(BeTrue()) } + assertGetZoneInvalidNameErrNotFound := func() { + for i := 0; i < numberOfNamespaces; i++ { + _, err := topology.GetZone(ctx, client, "invalid", fmt.Sprintf("ns-%d", i)) + ExpectWithOffset(1, apierrors.IsNotFound(err)).To(BeTrue()) + } + } + assertGetAvailabilityZoneEmptyNameErrNotFound := func() { _, err := topology.GetAvailabilityZone(ctx, client, "") ExpectWithOffset(1, apierrors.IsNotFound(err)).To(BeTrue()) } + assertGetZoneEmptyNameErrNotFound := func() { + for i := 0; i < numberOfNamespaces; i++ { + _, err := topology.GetZone(ctx, client, "", fmt.Sprintf("ns-%d", i)) + ExpectWithOffset(1, apierrors.IsNotFound(err)).To(BeTrue()) + } + } + assertGetNamespaceFolderAndRPMoIDInvalidNameErrNotFound := func() { _, _, err := topology.GetNamespaceFolderAndRPMoID(ctx, client, "invalid", "ns-1") ExpectWithOffset(1, apierrors.IsNotFound(err)).To(BeTrue()) @@ -125,6 +182,9 @@ var _ = Describe("Availability Zones", func() { for i := 0; i < numberOfAvailabilityZones; i++ { assertGetNamespaceFolderAndRPMoIDSuccessForAZ(fmt.Sprintf("az-%d", i)) } + for i := 0; i < numberOfZonesPerNamespace; i++ { + assertGetNamespaceFolderAndRPMoIDSuccessForAZ(fmt.Sprintf("zone-%d", i)) + } } assertGetNamespaceFolderAndRPMoIDInvalidAZErrNotFound := func() { @@ -241,7 +301,63 @@ var _ = Describe("Availability Zones", func() { }) }) - When("Availability zones do not exist", func() { + When("Two Zone resources exist per namespace", func() { + BeforeEach(func() { + numberOfZonesPerNamespace = 2 + pkgcfg.UpdateContext(ctx, func(config *pkgcfg.Config) { + config.Features.WorkloadDomainIsolation = true + }) + }) + When("Three DevOps Namespace resources exist", func() { + BeforeEach(func() { + numberOfNamespaces = 3 + }) + Context("GetZones", func() { + It("Should return the two Zone resources per namespace", assertGetZonesSuccess) + }) + Context("GetZone", func() { + Context("With a valid Zone name", func() { + It("Should return the AvailabilityZone resource", assertGetZoneSuccess) + }) + Context("With an invalid Zone name", func() { + It("Should return an apierrors.NotFound error", assertGetZoneInvalidNameErrNotFound) + }) + Context("With an empty Zone name", func() { + It("Should return an apierrors.NotFound error", assertGetZoneEmptyNameErrNotFound) + }) + }) + Context("GetNamespaceFolderAndRPMoID", func() { + Context("With an invalid Zone name", assertGetNamespaceFolderAndRPMoIDInvalidAZErrNotFound) + Context("With a valid Zone name", func() { + It("Should return the RP and Folder resources", assertGetNamespaceFolderAndRPMoIDSuccess) + }) + Context("With an invalid Zone name", func() { + It("Should return an apierrors.NotFound error", assertGetNamespaceFolderAndRPMoIDInvalidNameErrNotFound) + }) + Context("With an invalid Namespace name", func() { + It("Should return an error", assertGetNamespaceFolderAndRPMoIDInvalidNamespaceErrNotFound) + }) + }) + Context("GetNamespaceFolderMoID", func() { + Context("With an invalid Namespace name", func() { + It("Should return NotFound", assertGetNamespaceFolderMoIDInvalidNamespaceErrNotFound) + }) + Context("With a valid Namespace name", func() { + It("Should return the RP and Folder resources", assertGetNamespaceFolderMoIDSuccess) + }) + }) + Context("GetNamespaceFolderAndRPMoIDs", func() { + Context("With an invalid Namespace name", func() { + It("Should return NotFound", assertGetNamespaceFolderAndRPMoIDsInvalidNamespaceNoErr) + }) + Context("With a valid Namespace name", func() { + It("Should return the RP and Folder resources", assertGetNamespaceFolderAndRPMoIDsSuccess) + }) + }) + }) + }) + + When("Availability zones and zones do not exist", func() { When("DevOps Namespaces exist", func() { BeforeEach(func() { numberOfNamespaces = 3 @@ -260,7 +376,19 @@ var _ = Describe("Availability Zones", func() { }) }) }) + Context("GetZones", func() { + It("Should return an ErrNoZones", assertGetZonesErrNoZones) + }) + Context("GetZone", func() { + Context("With an invalid Zone name", func() { + It("Should return an apierrors.NotFound error", assertGetZoneInvalidNameErrNotFound) + }) + Context("With an empty Zone name", func() { + It("Should return an apierrors.NotFound error", assertGetZoneEmptyNameErrNotFound) + }) + }) }) + When("DevOps Namespaces do not exist", func() { Context("GetAvailabilityZones", func() { It("Should return an ErrNoAvailabilityZones", assertGetAvailabilityZonesErrNoAvailabilityZones) @@ -311,7 +439,7 @@ var _ = Describe("Availability Zones", func() { }) }) - When("there are no zones", func() { + When("there are no availability zones", func() { BeforeEach(func() { numberOfAvailabilityZones = 0 }) diff --git a/test/builder/dummies.go b/test/builder/dummies.go index dcf82b96c..f07299b98 100644 --- a/test/builder/dummies.go +++ b/test/builder/dummies.go @@ -29,16 +29,16 @@ import ( ) const ( - DummyVMIID = "vmi-0123456789" - DummyImageName = "dummy-image-name" - DummyClassName = "dummyClassName" - DummyVolumeName = "dummy-volume-name" - DummyPVCName = "dummyPVCName" - DummyDistroVersion = "dummyDistroVersion" - DummyOSType = "centosGuest" - DummyStorageClassName = "dummy-storage-class" - DummyResourceQuotaName = "dummy-resource-quota" - DummyAvailabilityZoneName = "dummy-availability-zone" + DummyVMIID = "vmi-0123456789" + DummyImageName = "dummy-image-name" + DummyClassName = "dummyClassName" + DummyVolumeName = "dummy-volume-name" + DummyPVCName = "dummyPVCName" + DummyDistroVersion = "dummyDistroVersion" + DummyOSType = "centosGuest" + DummyStorageClassName = "dummy-storage-class" + DummyResourceQuotaName = "dummy-resource-quota" + DummyZoneName = "dummy-zone" ) const ( @@ -73,7 +73,7 @@ func DummyResourceQuota(namespace, rlName string) *corev1.ResourceQuota { } func DummyAvailabilityZone() *topologyv1.AvailabilityZone { - return DummyNamedAvailabilityZone(DummyAvailabilityZoneName) + return DummyNamedAvailabilityZone(DummyZoneName) } func DummyNamedAvailabilityZone(name string) *topologyv1.AvailabilityZone { @@ -88,6 +88,21 @@ func DummyNamedAvailabilityZone(name string) *topologyv1.AvailabilityZone { } } +// DummyZone uses the same name with AZ. +func DummyZone(namespace string) *topologyv1.Zone { + return DummyNamedZone(DummyZoneName, namespace) +} + +func DummyNamedZone(name, namespace string) *topologyv1.Zone { + return &topologyv1.Zone{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: topologyv1.ZoneSpec{}, + } +} + func DummyPersistentVolumeClaim() *corev1.PersistentVolumeClaim { var storageClass = "dummy-storage-class" return &corev1.PersistentVolumeClaim{ diff --git a/webhooks/virtualmachine/validation/virtualmachine_validator.go b/webhooks/virtualmachine/validation/virtualmachine_validator.go index b9c102cbe..1e6aca93c 100644 --- a/webhooks/virtualmachine/validation/virtualmachine_validator.go +++ b/webhooks/virtualmachine/validation/virtualmachine_validator.go @@ -1127,9 +1127,17 @@ func (v validator) validateAvailabilityZone(ctx *pkgctx.WebhookRequestContext, v } // Validate the name of the provided availability zone. - if zone := vm.Labels[topology.KubernetesTopologyZoneLabelKey]; zone != "" { - if _, err := topology.GetAvailabilityZone(ctx.Context, v.client, zone); err != nil { - return append(allErrs, field.Invalid(zoneLabelPath, zone, err.Error())) + zoneLabelVal := vm.Labels[topology.KubernetesTopologyZoneLabelKey] + if zoneLabelVal != "" { + if pkgcfg.FromContext(ctx).Features.WorkloadDomainIsolation { + // Validate the name of the provided zone. + // It is the same name as az. + if _, err := topology.GetZone(ctx.Context, v.client, zoneLabelVal, vm.Namespace); err != nil { + return append(allErrs, field.Invalid(zoneLabelPath, zoneLabelVal, err.Error())) + } + } + if _, err := topology.GetAvailabilityZone(ctx.Context, v.client, zoneLabelVal); err != nil { + return append(allErrs, field.Invalid(zoneLabelPath, zoneLabelVal, err.Error())) } } diff --git a/webhooks/virtualmachine/validation/virtualmachine_validator_unit_test.go b/webhooks/virtualmachine/validation/virtualmachine_validator_unit_test.go index b2918afab..6d20f4e82 100644 --- a/webhooks/virtualmachine/validation/virtualmachine_validator_unit_test.go +++ b/webhooks/virtualmachine/validation/virtualmachine_validator_unit_test.go @@ -43,6 +43,7 @@ const ( dummyCreatedAtSchemaVersionVal = "dummy-created-at-schema-version" dummyPausedVMLabelVal = "dummy-devops" dummyVmiName = "vmi-dummy" + dummyNamespaceName = "dummy-vm-namespace-for-webhook-validation" vmiKind = "VirtualMachineImage" cvmiKind = "Cluster" + vmiKind invalidImageKind = "supported: " + vmiKind + "; " + cvmiKind @@ -106,7 +107,7 @@ type unitValidatingWebhookContext struct { func newUnitTestContextForValidatingWebhook(isUpdate bool) *unitValidatingWebhookContext { vm := builder.DummyVirtualMachine() vm.Name = "dummy-vm" - vm.Namespace = "dummy-vm-namespace-for-webhook-validation" + vm.Namespace = dummyNamespaceName obj, err := builder.ToUnstructured(vm) Expect(err).ToNot(HaveOccurred()) @@ -119,8 +120,9 @@ func newUnitTestContextForValidatingWebhook(isUpdate bool) *unitValidatingWebhoo Expect(err).ToNot(HaveOccurred()) } - zone := builder.DummyAvailabilityZone() - initObjects := []client.Object{zone} + az := builder.DummyAvailabilityZone() + zone := builder.DummyZone(dummyNamespaceName) + initObjects := []client.Object{az, zone} return &unitValidatingWebhookContext{ UnitTestContextForValidatingWebhook: *suite.NewUnitTestContextForValidatingWebhook(obj, oldObj, initObjects...), @@ -143,9 +145,6 @@ func unitTestsValidateCreate() { invalidPVCName bool invalidPVCReadOnly bool withInstanceStorageVolumes bool - isNoAvailabilityZones bool - isInvalidAvailabilityZone bool - isEmptyAvailabilityZone bool powerState vmopv1.VirtualMachinePowerState nextRestartTime string instanceUUID string @@ -179,19 +178,6 @@ func unitTestsValidateCreate() { ctx.vm.Spec.Volumes = append(ctx.vm.Spec.Volumes, instanceStorageVolumes...) } - if args.isNoAvailabilityZones { - Expect(ctx.Client.Delete(ctx, builder.DummyAvailabilityZone())).To(Succeed()) - } - //nolint:gocritic // Ignore linter complaint about converting to switch case since the following is more readable. - if args.isEmptyAvailabilityZone { - delete(ctx.vm.Labels, topology.KubernetesTopologyZoneLabelKey) - } else if args.isInvalidAvailabilityZone { - ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = "invalid" - } else { - zoneName := builder.DummyAvailabilityZoneName - ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = zoneName - } - ctx.vm.Spec.PowerState = args.powerState ctx.vm.Spec.NextRestartTime = args.nextRestartTime ctx.vm.Spec.InstanceUUID = args.instanceUUID @@ -216,6 +202,9 @@ func unitTestsValidateCreate() { pkgcfg.SetContext(ctx, func(config *pkgcfg.Config) { config.Features.IsoSupport = true }) + pkgcfg.SetContext(ctx, func(config *pkgcfg.Config) { + config.Features.WorkloadDomainIsolation = true + }) }) AfterEach(func() { @@ -244,13 +233,6 @@ func unitTestsValidateCreate() { field.Forbidden(volPath, "adding or modifying instance storage volume claim(s) is not allowed").Error(), nil), Entry("should allow when there are instance storage volumes and user is service user", createArgs{isServiceUser: true, withInstanceStorageVolumes: true}, true, nil, nil), - Entry("should allow when VM specifies no availability zone, there are availability zones", createArgs{isEmptyAvailabilityZone: true}, true, nil, nil), - Entry("should allow when VM specifies no availability zone, there are no availability zones", createArgs{isEmptyAvailabilityZone: true, isNoAvailabilityZones: true}, true, nil, nil), - Entry("should allow when VM specifies valid availability zone, there are availability zones", createArgs{}, true, nil, nil), - Entry("should deny when VM specifies invalid availability zone, there are availability zones", createArgs{isInvalidAvailabilityZone: true}, false, nil, nil), - Entry("should deny when VM specifies invalid availability zone, there are no availability zones", createArgs{isInvalidAvailabilityZone: true, isNoAvailabilityZones: true}, false, nil, nil), - Entry("should deny when there are no availability zones and WCP FaultDomains FSS is enabled", createArgs{isNoAvailabilityZones: true}, false, nil, nil), - Entry("should disallow creating VM with suspended power state", createArgs{powerState: vmopv1.VirtualMachinePowerStateSuspended}, false, field.Invalid(specPath.Child("powerState"), vmopv1.VirtualMachinePowerStateSuspended, "cannot set a new VM's power state to Suspended").Error(), nil), @@ -283,6 +265,90 @@ func unitTestsValidateCreate() { } } + Context("availability zone and zone", func() { + DescribeTable("create", doTest, + Entry("should allow when VM specifies no availability zone, there are availability zones and zones", + testParams{ + setup: func(ctx *unitValidatingWebhookContext) { + delete(ctx.vm.Labels, topology.KubernetesTopologyZoneLabelKey) + }, + expectAllowed: true, + }, + ), + Entry("should allow when VM specifies no availability zone, there are no availability zones or zones", + testParams{ + setup: func(ctx *unitValidatingWebhookContext) { + delete(ctx.vm.Labels, topology.KubernetesTopologyZoneLabelKey) + Expect(ctx.Client.Delete(ctx, builder.DummyAvailabilityZone())).To(Succeed()) + Expect(ctx.Client.Delete(ctx, builder.DummyZone(dummyNamespaceName))).To(Succeed()) + }, + expectAllowed: true, + }, + ), + Entry("should allow when VM specifies valid availability zone, there are availability zones and zones", + testParams{ + setup: func(ctx *unitValidatingWebhookContext) { + zoneName := builder.DummyZoneName + ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = zoneName + }, + expectAllowed: true, + }, + ), + Entry("when Workload Domain Isolation FSS disabled, should allow when VM specifies valid availability zone, there are availability zones but no zones", + testParams{ + setup: func(ctx *unitValidatingWebhookContext) { + pkgcfg.SetContext(ctx, func(config *pkgcfg.Config) { + config.Features.WorkloadDomainIsolation = false + }) + zoneName := builder.DummyZoneName + ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = zoneName + Expect(ctx.Client.Delete(ctx, builder.DummyZone(dummyNamespaceName))).To(Succeed()) + }, + expectAllowed: true, + }, + ), + Entry("when Workload Domain Isolation FSS enabled, should deny when VM specifies valid availability zone, there are availability zones but no zones", + testParams{ + setup: func(ctx *unitValidatingWebhookContext) { + zoneName := builder.DummyZoneName + ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = zoneName + Expect(ctx.Client.Delete(ctx, builder.DummyZone(dummyNamespaceName))).To(Succeed()) + }, + expectAllowed: false, + }, + ), + Entry("should deny when VM specifies valid availability zone, there are no availability zones or zones", + testParams{ + setup: func(ctx *unitValidatingWebhookContext) { + zoneName := builder.DummyZoneName + ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = zoneName + Expect(ctx.Client.Delete(ctx, builder.DummyAvailabilityZone())).To(Succeed()) + Expect(ctx.Client.Delete(ctx, builder.DummyZone(dummyNamespaceName))).To(Succeed()) + }, + expectAllowed: false, + }, + ), + Entry("should deny when VM specifies invalid availability zone, there are availability zones and zones", + testParams{ + setup: func(ctx *unitValidatingWebhookContext) { + ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = "invalid" + }, + expectAllowed: false, + }, + ), + Entry("should deny when VM specifies invalid availability zone, there are no availability zones or zones", + testParams{ + setup: func(ctx *unitValidatingWebhookContext) { + ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = "invalid" + Expect(ctx.Client.Delete(ctx, builder.DummyAvailabilityZone())).To(Succeed()) + Expect(ctx.Client.Delete(ctx, builder.DummyZone(dummyNamespaceName))).To(Succeed()) + }, + expectAllowed: false, + }, + ), + ) + }) + DescribeTable( "spec.className", doTest, @@ -2111,11 +2177,11 @@ func unitTestsValidateUpdate() { ctx.vm.Spec.Reserved.ResourcePolicyName = "policy" + updateSuffix } if args.assignZoneName { - ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = builder.DummyAvailabilityZoneName + ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = builder.DummyZoneName } if args.changeZoneName { - ctx.oldVM.Labels[topology.KubernetesTopologyZoneLabelKey] = builder.DummyAvailabilityZoneName - ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = builder.DummyAvailabilityZoneName + updateSuffix + ctx.oldVM.Labels[topology.KubernetesTopologyZoneLabelKey] = builder.DummyZoneName + ctx.vm.Labels[topology.KubernetesTopologyZoneLabelKey] = builder.DummyZoneName + updateSuffix } if args.withInstanceStorageVolumes {