From f4e7850c066dbf2bcc7e8c0ef39e7a53bad2222a Mon Sep 17 00:00:00 2001 From: Sam Lucidi Date: Thu, 5 Dec 2024 08:58:29 -0500 Subject: [PATCH] Allow setting storage mappings for OVA disks * Exposes OVA appliance disks as mappable storage in the OVA inventory collector. * Permits mapping any or all OVA appliance disks to their own destination storage classes. * Any disks that are not specifically mapped are assigned to a default storage class, which must be given a storage mapping. Signed-off-by: Sam Lucidi --- pkg/controller/plan/adapter/ova/builder.go | 108 +++++++++++++----- pkg/controller/plan/adapter/ova/validator.go | 23 +++- .../provider/container/ova/model.go | 91 +++++++++++++-- pkg/controller/provider/web/ova/client.go | 1 + tests/suit/framework/ova.go | 4 +- tests/suit/ova_test.go | 1 - 6 files changed, 182 insertions(+), 46 deletions(-) diff --git a/pkg/controller/plan/adapter/ova/builder.go b/pkg/controller/plan/adapter/ova/builder.go index db442ffd2..2f2ca3c2b 100644 --- a/pkg/controller/plan/adapter/ova/builder.go +++ b/pkg/controller/plan/adapter/ova/builder.go @@ -10,6 +10,7 @@ import ( "strconv" "strings" + "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan" "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/ref" planbase "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/base" @@ -61,6 +62,11 @@ const ( Unknown = "unknown" ) +// Default Storage +const ( + DefaultStorageID = "default" +) + // Regex which matches the snapshot identifier suffix of a // OVA disk backing file. var backingFilePattern = regexp.MustCompile(`-\d\d\d\d\d\d.vmdk`) @@ -154,41 +160,46 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config return } - // For OVA provider we are assuming a single storage mapping. - dsMapIn := r.Context.Map.Storage.Spec.Map - for _, mapped := range dsMapIn { + var defaultMapping *v1beta1.DestinationStorage + mappedDiskIds := make(map[string]bool) + storageMapIn := r.Context.Map.Storage.Spec.Map + for i := range storageMapIn { + mapped := &storageMapIn[i] + ref := mapped.Source + storage := &model.Storage{} + fErr := r.Source.Inventory.Find(storage, ref) + if fErr != nil { + err = fErr + return + } + if storage.ID == DefaultStorageID { + defaultMapping = &mapped.Destination + continue + } for _, disk := range vm.Disks { - diskSize, err := getResourceCapacity(disk.Capacity, disk.CapacityAllocationUnits) - if err != nil { - return nil, err - } - storageClass := mapped.Destination.StorageClass - dvSource := cdi.DataVolumeSource{ - Blank: &cdi.DataVolumeBlankImage{}, - } - dvSpec := cdi.DataVolumeSpec{ - Source: &dvSource, - Storage: &cdi.StorageSpec{ - Resources: core.ResourceRequirements{ - Requests: core.ResourceList{ - core.ResourceStorage: *resource.NewQuantity(diskSize, resource.BinarySI), - }, - }, - StorageClassName: &storageClass, - }, + if disk.ID == storage.ID { + var dv *cdi.DataVolume + dv, err = r.mapDataVolume(disk, mapped.Destination, dvTemplate) + if err != nil { + return + } + dvs = append(dvs, *dv) + mappedDiskIds[disk.ID] = true } - // set the access mode and volume mode if they were specified in the storage map. - // otherwise, let the storage profile decide the default values. - if mapped.Destination.AccessMode != "" { - dvSpec.Storage.AccessModes = []core.PersistentVolumeAccessMode{mapped.Destination.AccessMode} + } + } + + for _, disk := range vm.Disks { + if !mappedDiskIds[disk.ID] { + if defaultMapping == nil { + err = liberr.New("VM has unmapped disks and no default disk mapping is set.", "vm", vm.ID) + return } - if mapped.Destination.VolumeMode != "" { - dvSpec.Storage.VolumeMode = &mapped.Destination.VolumeMode + var dv *cdi.DataVolume + dv, err = r.mapDataVolume(disk, *defaultMapping, dvTemplate) + if err != nil { + return } - - dv := dvTemplate.DeepCopy() - dv.Spec = dvSpec - updateDataVolumeAnnotations(dv, &disk) dvs = append(dvs, *dv) } } @@ -196,6 +207,41 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config return } +func (r *Builder) mapDataVolume(disk ova.Disk, destination v1beta1.DestinationStorage, dvTemplate *cdi.DataVolume) (dv *cdi.DataVolume, err error) { + diskSize, err := getResourceCapacity(disk.Capacity, disk.CapacityAllocationUnits) + if err != nil { + return + } + storageClass := destination.StorageClass + dvSource := cdi.DataVolumeSource{ + Blank: &cdi.DataVolumeBlankImage{}, + } + dvSpec := cdi.DataVolumeSpec{ + Source: &dvSource, + Storage: &cdi.StorageSpec{ + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceStorage: *resource.NewQuantity(diskSize, resource.BinarySI), + }, + }, + StorageClassName: &storageClass, + }, + } + // set the access mode and volume mode if they were specified in the storage map. + // otherwise, let the storage profile decide the default values. + if destination.AccessMode != "" { + dvSpec.Storage.AccessModes = []core.PersistentVolumeAccessMode{destination.AccessMode} + } + if destination.VolumeMode != "" { + dvSpec.Storage.VolumeMode = &destination.VolumeMode + } + + dv = dvTemplate.DeepCopy() + dv.Spec = dvSpec + updateDataVolumeAnnotations(dv, &disk) + return +} + func updateDataVolumeAnnotations(dv *cdi.DataVolume, disk *ova.Disk) { if dv.ObjectMeta.Annotations == nil { dv.ObjectMeta.Annotations = make(map[string]string) diff --git a/pkg/controller/plan/adapter/ova/validator.go b/pkg/controller/plan/adapter/ova/validator.go index 565f7eacb..4e99335dd 100644 --- a/pkg/controller/plan/adapter/ova/validator.go +++ b/pkg/controller/plan/adapter/ova/validator.go @@ -84,8 +84,27 @@ func (r *Validator) PodNetwork(vmRef ref.Ref) (ok bool, err error) { // Validate that a VM's disk backing storage has been mapped. func (r *Validator) StorageMapped(vmRef ref.Ref) (ok bool, err error) { - //For OVA providers, we don't have an actual storage connected, - // since we use a dummy storage for mapping the function should always return true. + if r.plan.Referenced.Map.Storage == nil { + return + } + vm := &model.VM{} + err = r.inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap(err, "vm", vmRef.String()) + return + } + + // If a default mapping is defined, that satisfies the requirement. + if r.plan.Referenced.Map.Storage.Status.Refs.Find(ref.Ref{ID: DefaultStorageID}) { + ok = true + return + } + + for _, disk := range vm.Disks { + if !r.plan.Referenced.Map.Storage.Status.Refs.Find(ref.Ref{ID: disk.ID}) { + return + } + } ok = true return } diff --git a/pkg/controller/provider/container/ova/model.go b/pkg/controller/provider/container/ova/model.go index 651733334..14d75d0a8 100644 --- a/pkg/controller/provider/container/ova/model.go +++ b/pkg/controller/provider/container/ova/model.go @@ -3,7 +3,6 @@ package ova import ( "context" "errors" - "fmt" api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" model "github.com/konveyor/forklift-controller/pkg/controller/provider/model/ova" @@ -12,6 +11,12 @@ import ( "github.com/konveyor/forklift-controller/pkg/lib/logging" ) +// Default Storage +const ( + DefaultStorageID = "default" + DefaultStorageName = "Default" +) + // All adapters. var adapterList []Adapter @@ -356,33 +361,99 @@ type StorageAdapter struct { } func (r *StorageAdapter) GetUpdates(ctx *Context) (updates []Updater, err error) { + disks := []Disk{} + err = ctx.client.list("disks", &disks) + if err != nil { + return + } + for i := range disks { + disk := &disks[i] + updater := func(tx *libmodel.Tx) (err error) { + m := &model.Storage{ + Base: model.Base{ + ID: disk.ID, + }, + } + err = tx.Get(m) + if err != nil { + if errors.Is(err, libmodel.NotFound) { + m.Name = disk.Name + err = tx.Insert(m) + } + return + } + m.Name = disk.Name + err = tx.Update(m) + return + } + updates = append(updates, updater) + } return } // List the collection. func (r *StorageAdapter) List(ctx *Context, provider *api.Provider) (itr fb.Iterator, err error) { - storageName := fmt.Sprintf("Dummy storage for source provider %s", provider.Name) - dummyStorge := Storage{ - Name: storageName, - ID: string(provider.UID), + diskList := []Disk{} + err = ctx.client.list("disks", &diskList) + if err != nil { + return } list := fb.NewList() m := &model.Storage{ Base: model.Base{ - ID: dummyStorge.ID, - Name: dummyStorge.Name, + ID: DefaultStorageID, + Name: DefaultStorageName, }, } - dummyStorge.ApplyTo(m) list.Append(m) + for _, object := range diskList { + m := &model.Storage{ + Base: model.Base{ + ID: object.ID, + Name: object.Name, + }, + } + list.Append(m) + } + itr = list.Iter() return } func (r *StorageAdapter) DeleteUnexisting(ctx *Context) (deletions []Updater, err error) { - // Each provider have only one storage hence it can't be changed, - // Will be removed only if the provider deleted. + storageList := []model.Storage{} + err = ctx.db.List(&storageList, libmodel.FilterOptions{}) + if err != nil { + if errors.Is(err, libmodel.NotFound) { + err = nil + } + return + } + inventory := make(map[string]bool) + for _, storage := range storageList { + inventory[storage.ID] = true + } + disks := []Disk{} + err = ctx.client.list("disks", &disks) + if err != nil { + return + } + gone := []string{} + for _, disk := range disks { + if _, found := inventory[disk.ID]; !found { + gone = append(gone, disk.ID) + } + } + for _, id := range gone { + updater := func(tx *libmodel.Tx) (err error) { + m := &model.Storage{ + Base: model.Base{ID: id}, + } + return tx.Delete(m) + } + deletions = append(deletions, updater) + } return } diff --git a/pkg/controller/provider/web/ova/client.go b/pkg/controller/provider/web/ova/client.go index 0756dd52a..75c2b6bbc 100644 --- a/pkg/controller/provider/web/ova/client.go +++ b/pkg/controller/provider/web/ova/client.go @@ -298,6 +298,7 @@ func (r *Finder) Storage(ref *base.Ref) (object interface{}, err error) { storage := &Storage{} err = r.ByRef(storage, *ref) if err == nil { + ref.ID = storage.ID ref.Name = storage.Name object = storage } diff --git a/tests/suit/framework/ova.go b/tests/suit/framework/ova.go index b0a068bb9..46009d3f5 100644 --- a/tests/suit/framework/ova.go +++ b/tests/suit/framework/ova.go @@ -19,7 +19,7 @@ func (r *OvaClient) LoadSourceDetails() (vm *OvaVM, err error) { r.vmData.testVMId = "c5686650854d1e69b4123f4bf2e70fe1ed2a" r.vmData.testNetworkID = "ae1badc8c693926f492a01e2f357d6af321b" - r.vmData.testStorageName = "Dummy storage for source provider ova-provider" + r.vmData.testStorageName = "centos44_new-disk1.vmdk" return &r.vmData, nil } @@ -39,7 +39,7 @@ func (r *OvaClient) GetNfsServerForOva(k8sClient *kubernetes.Clientset) (string, } nfsShare := server + ":" + share if nfsShare == "" { - return "", errors.New("failed to fatch NFS settings") + return "", errors.New("failed to fetch NFS settings") } r.nfsPath = nfsShare diff --git a/tests/suit/ova_test.go b/tests/suit/ova_test.go index 0a2b2593d..0b2cb53a3 100644 --- a/tests/suit/ova_test.go +++ b/tests/suit/ova_test.go @@ -80,6 +80,5 @@ var _ = Describe("[level:component]Migration tests for OVA provider", func() { Expect(err).ToNot(HaveOccurred()) err = utils.WaitForMigrationSucceededWithTimeout(f.CrClient, provider.Namespace, test_migration_name, 900*time.Second) Expect(err).ToNot(HaveOccurred()) - }) })