diff --git a/api/Taskfile.dist.yaml b/api/Taskfile.dist.yaml index c5ab64b9e8..606fe04e93 100644 --- a/api/Taskfile.dist.yaml +++ b/api/Taskfile.dist.yaml @@ -87,4 +87,4 @@ tasks: - go install -mod=readonly sigs.k8s.io/controller-tools/cmd/controller-gen@v{{ .CONTROLLER_GEN_VERSION }} status: - | - ls $GOPATH/bin/controller-gen + $GOPATH/bin/controller-gen --version | grep -q "v{{ .CONTROLLER_GEN_VERSION }}" diff --git a/api/client/kubeclient/client.go b/api/client/kubeclient/client.go index 98c0bdb5dd..2a8ed7f57f 100644 --- a/api/client/kubeclient/client.go +++ b/api/client/kubeclient/client.go @@ -83,6 +83,8 @@ type VirtualMachineInterface interface { Freeze(ctx context.Context, name string, opts v1alpha2.VirtualMachineFreeze) error Unfreeze(ctx context.Context, name string) error Migrate(ctx context.Context, name string, opts v1alpha2.VirtualMachineMigrate) error + AddVolume(ctx context.Context, name string, opts v1alpha2.VirtualMachineAddVolume) error + RemoveVolume(ctx context.Context, name string, opts v1alpha2.VirtualMachineRemoveVolume) error } type client struct { diff --git a/api/client/kubeclient/vm.go b/api/client/kubeclient/vm.go index 3d2f41eb5c..e30bc4e6ff 100644 --- a/api/client/kubeclient/vm.go +++ b/api/client/kubeclient/vm.go @@ -149,3 +149,27 @@ func (v vm) Migrate(ctx context.Context, name string, opts v1alpha2.VirtualMachi } return v.restClient.Put().AbsPath(path).Body(body).Do(ctx).Error() } + +func (v vm) AddVolume(ctx context.Context, name string, opts v1alpha2.VirtualMachineAddVolume) error { + path := fmt.Sprintf(subresourceURLTpl, v.namespace, v.resource, name, "addvolume") + return v.restClient. + Put(). + AbsPath(path). + Param("name", opts.Name). + Param("volumeKind", opts.VolumeKind). + Param("pvcName", opts.PVCName). + Param("image", opts.Image). + Param("isCdrom", strconv.FormatBool(opts.IsCdrom)). + Do(ctx). + Error() +} + +func (v vm) RemoveVolume(ctx context.Context, name string, opts v1alpha2.VirtualMachineRemoveVolume) error { + path := fmt.Sprintf(subresourceURLTpl, v.namespace, v.resource, name, "removevolume") + return v.restClient. + Put(). + AbsPath(path). + Param("name", opts.Name). + Do(ctx). + Error() +} diff --git a/api/core/v1alpha2/virtual_machine_block_disk_attachment.go b/api/core/v1alpha2/virtual_machine_block_device_attachment.go similarity index 89% rename from api/core/v1alpha2/virtual_machine_block_disk_attachment.go rename to api/core/v1alpha2/virtual_machine_block_device_attachment.go index 287a0bb2e3..912d453958 100644 --- a/api/core/v1alpha2/virtual_machine_block_disk_attachment.go +++ b/api/core/v1alpha2/virtual_machine_block_device_attachment.go @@ -72,6 +72,8 @@ type VirtualMachineBlockDeviceAttachmentStatus struct { type VMBDAObjectRef struct { // The type of the block device. Options are: // * `VirtualDisk` — use `VirtualDisk` as the disk. This type is always mounted in RW mode. + // * `VirtualImage` — use `VirtualImage` as the disk. This type is always mounted in RO mode. + // * `ClusterVirtualImage` - use `ClusterVirtualImage` as the disk. This type is always mounted in RO mode. Kind VMBDAObjectRefKind `json:"kind,omitempty"` // The name of block device to attach. Name string `json:"name,omitempty"` @@ -79,11 +81,13 @@ type VMBDAObjectRef struct { // VMBDAObjectRefKind defines the type of the block device. // -// +kubebuilder:validation:Enum={VirtualDisk} +// +kubebuilder:validation:Enum={VirtualDisk,VirtualImage,ClusterVirtualImage} type VMBDAObjectRefKind string const ( - VMBDAObjectRefKindVirtualDisk VMBDAObjectRefKind = "VirtualDisk" + VMBDAObjectRefKindVirtualDisk VMBDAObjectRefKind = "VirtualDisk" + VMBDAObjectRefKindVirtualImage VMBDAObjectRefKind = "VirtualImage" + VMBDAObjectRefKindClusterVirtualImage VMBDAObjectRefKind = "ClusterVirtualImage" ) // BlockDeviceAttachmentPhase defines current status of resource: diff --git a/api/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go b/api/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go index d769f54872..546666405b 100644 --- a/api/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go +++ b/api/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go @@ -2072,7 +2072,7 @@ func schema_virtualization_api_core_v1alpha2_VMBDAObjectRef(ref common.Reference Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ - Description: "The type of the block device. Options are: * `VirtualDisk` — use `VirtualDisk` as the disk. This type is always mounted in RW mode.", + Description: "The type of the block device. Options are: * `VirtualDisk` — use `VirtualDisk` as the disk. This type is always mounted in RW mode. * `VirtualImage` — use `VirtualImage` as the disk. This type is always mounted in RO mode. * `ClusterVirtualImage` - use `ClusterVirtualImage` as the disk. This type is always mounted in RO mode.", Type: []string{"string"}, Format: "", }, @@ -5235,7 +5235,43 @@ func schema_virtualization_api_subresources_v1alpha2_VirtualMachineAddVolume(ref Format: "", }, }, + "name": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeKind": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "pvcName": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "isCdrom": { + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, }, + Required: []string{"name", "volumeKind", "pvcName", "image", "isCdrom"}, }, }, } @@ -5402,7 +5438,15 @@ func schema_virtualization_api_subresources_v1alpha2_VirtualMachineRemoveVolume( Format: "", }, }, + "name": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, }, + Required: []string{"name"}, }, }, } diff --git a/api/subresources/types.go b/api/subresources/types.go index 264029e845..c8d84e5c12 100644 --- a/api/subresources/types.go +++ b/api/subresources/types.go @@ -16,7 +16,9 @@ limitations under the License. package subresources -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:readonly @@ -51,6 +53,11 @@ type VirtualMachinePortForward struct { type VirtualMachineAddVolume struct { metav1.TypeMeta + Name string + VolumeKind string + PVCName string + Image string + IsCdrom bool } // +genclient @@ -59,6 +66,7 @@ type VirtualMachineAddVolume struct { type VirtualMachineRemoveVolume struct { metav1.TypeMeta + Name string } // +genclient diff --git a/api/subresources/v1alpha2/types.go b/api/subresources/v1alpha2/types.go index 916d7cbada..a4c1a054cb 100644 --- a/api/subresources/v1alpha2/types.go +++ b/api/subresources/v1alpha2/types.go @@ -55,6 +55,11 @@ type VirtualMachinePortForward struct { type VirtualMachineAddVolume struct { metav1.TypeMeta `json:",inline"` + Name string `json:"name"` + VolumeKind string `json:"volumeKind"` + PVCName string `json:"pvcName"` + Image string `json:"image"` + IsCdrom bool `json:"isCdrom"` } // +genclient @@ -64,6 +69,7 @@ type VirtualMachineAddVolume struct { type VirtualMachineRemoveVolume struct { metav1.TypeMeta `json:",inline"` + Name string `json:"name"` } // +genclient diff --git a/api/subresources/v1alpha2/zz_generated.conversion.go b/api/subresources/v1alpha2/zz_generated.conversion.go index 330a5be605..4ed39f59c7 100644 --- a/api/subresources/v1alpha2/zz_generated.conversion.go +++ b/api/subresources/v1alpha2/zz_generated.conversion.go @@ -162,6 +162,11 @@ func RegisterConversions(s *runtime.Scheme) error { } func autoConvert_v1alpha2_VirtualMachineAddVolume_To_subresources_VirtualMachineAddVolume(in *VirtualMachineAddVolume, out *subresources.VirtualMachineAddVolume, s conversion.Scope) error { + out.Name = in.Name + out.VolumeKind = in.VolumeKind + out.PVCName = in.PVCName + out.Image = in.Image + out.IsCdrom = in.IsCdrom return nil } @@ -171,6 +176,11 @@ func Convert_v1alpha2_VirtualMachineAddVolume_To_subresources_VirtualMachineAddV } func autoConvert_subresources_VirtualMachineAddVolume_To_v1alpha2_VirtualMachineAddVolume(in *subresources.VirtualMachineAddVolume, out *VirtualMachineAddVolume, s conversion.Scope) error { + out.Name = in.Name + out.VolumeKind = in.VolumeKind + out.PVCName = in.PVCName + out.Image = in.Image + out.IsCdrom = in.IsCdrom return nil } @@ -182,6 +192,41 @@ func Convert_subresources_VirtualMachineAddVolume_To_v1alpha2_VirtualMachineAddV func autoConvert_url_Values_To_v1alpha2_VirtualMachineAddVolume(in *url.Values, out *VirtualMachineAddVolume, s conversion.Scope) error { // WARNING: Field TypeMeta does not have json tag, skipping. + if values, ok := map[string][]string(*in)["name"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Name, s); err != nil { + return err + } + } else { + out.Name = "" + } + if values, ok := map[string][]string(*in)["volumeKind"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.VolumeKind, s); err != nil { + return err + } + } else { + out.VolumeKind = "" + } + if values, ok := map[string][]string(*in)["pvcName"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.PVCName, s); err != nil { + return err + } + } else { + out.PVCName = "" + } + if values, ok := map[string][]string(*in)["image"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Image, s); err != nil { + return err + } + } else { + out.Image = "" + } + if values, ok := map[string][]string(*in)["isCdrom"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.IsCdrom, s); err != nil { + return err + } + } else { + out.IsCdrom = false + } return nil } @@ -339,6 +384,7 @@ func Convert_url_Values_To_v1alpha2_VirtualMachinePortForward(in *url.Values, ou } func autoConvert_v1alpha2_VirtualMachineRemoveVolume_To_subresources_VirtualMachineRemoveVolume(in *VirtualMachineRemoveVolume, out *subresources.VirtualMachineRemoveVolume, s conversion.Scope) error { + out.Name = in.Name return nil } @@ -348,6 +394,7 @@ func Convert_v1alpha2_VirtualMachineRemoveVolume_To_subresources_VirtualMachineR } func autoConvert_subresources_VirtualMachineRemoveVolume_To_v1alpha2_VirtualMachineRemoveVolume(in *subresources.VirtualMachineRemoveVolume, out *VirtualMachineRemoveVolume, s conversion.Scope) error { + out.Name = in.Name return nil } @@ -359,6 +406,13 @@ func Convert_subresources_VirtualMachineRemoveVolume_To_v1alpha2_VirtualMachineR func autoConvert_url_Values_To_v1alpha2_VirtualMachineRemoveVolume(in *url.Values, out *VirtualMachineRemoveVolume, s conversion.Scope) error { // WARNING: Field TypeMeta does not have json tag, skipping. + if values, ok := map[string][]string(*in)["name"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Name, s); err != nil { + return err + } + } else { + out.Name = "" + } return nil } diff --git a/crds/doc-ru-virtualmachineblockdeviceattachments.yaml b/crds/doc-ru-virtualmachineblockdeviceattachments.yaml index e32506de51..3ee3223472 100644 --- a/crds/doc-ru-virtualmachineblockdeviceattachments.yaml +++ b/crds/doc-ru-virtualmachineblockdeviceattachments.yaml @@ -16,6 +16,8 @@ spec: description: | Тип блочного устройства. Возможны следующие варианты: * `VirtualDisk` — использовать `VirtualDisk` в качестве диска. Этот тип всегда монтируется в режиме RW. + * `VirtualImage` — использовать `VirtualImage` в качестве диска. Этот тип всегда монтируется в режиме RO. + * `ClusterVirtualImage` - использовать `ClusterVirtualImage` в качестве диска. Этот тип всегда монтируется в режиме RO. name: description: | Имя блочного устройства diff --git a/crds/virtualmachineblockdeviceattachments.yaml b/crds/virtualmachineblockdeviceattachments.yaml index 953465bc1c..e140fb88d0 100644 --- a/crds/virtualmachineblockdeviceattachments.yaml +++ b/crds/virtualmachineblockdeviceattachments.yaml @@ -71,8 +71,12 @@ spec: description: |- The type of the block device. Options are: * `VirtualDisk` — use `VirtualDisk` as the disk. This type is always mounted in RW mode. + * `VirtualImage` — use `VirtualImage` as the disk. This type is always mounted in RO mode. + * `ClusterVirtualImage` - use `ClusterVirtualImage` as the disk. This type is always mounted in RO mode. enum: - VirtualDisk + - VirtualImage + - ClusterVirtualImage type: string name: description: The name of block device to attach. diff --git a/images/virtualization-artifact/cmd/virtualization-controller/main.go b/images/virtualization-artifact/cmd/virtualization-controller/main.go index b5cf24b014..01b15fb88d 100644 --- a/images/virtualization-artifact/cmd/virtualization-controller/main.go +++ b/images/virtualization-artifact/cmd/virtualization-controller/main.go @@ -257,7 +257,7 @@ func main() { } vmbdaLogger := logger.NewControllerLogger(vmbda.ControllerName, logLevel, logOutput, logDebugVerbosity, logDebugControllerList) - if _, err = vmbda.NewController(ctx, mgr, vmbdaLogger, controllerNamespace); err != nil { + if _, err = vmbda.NewController(ctx, mgr, virtClient, vmbdaLogger, controllerNamespace); err != nil { log.Error(err.Error()) os.Exit(1) } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/add_volume.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/add_volume.go index 2688425c95..4628a106b3 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/add_volume.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/add_volume.go @@ -18,12 +18,15 @@ package rest import ( "context" + "encoding/json" "fmt" "net/http" "net/url" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/rest" + virtv1 "kubevirt.io/api/core/v1" "github.com/deckhouse/virtualization-controller/pkg/tls/certmanager" virtlisters "github.com/deckhouse/virtualization/api/client/generated/listers/core/v1alpha2" @@ -61,11 +64,27 @@ func (r AddVolumeREST) Connect(ctx context.Context, name string, opts runtime.Ob if !ok { return nil, fmt.Errorf("invalid options object: %#v", opts) } - location, transport, err := AddVolumeLocation(ctx, r.vmLister, name, addVolumeOpts, r.kubevirt, r.proxyCertManager) + var ( + addVolumePather pather + hooks []mutateRequestHook + ) + + if r.requestFromKubevirt(addVolumeOpts) { + addVolumePather = newKVVMIPather("addvolume") + } else { + addVolumePather = newKVVMPather("addvolume") + h, err := r.genMutateRequestHook(addVolumeOpts) + if err != nil { + return nil, err + } + hooks = append(hooks, h) + } + location, transport, err := AddVolumeLocation(ctx, r.vmLister, name, addVolumeOpts, r.kubevirt, r.proxyCertManager, addVolumePather) if err != nil { return nil, err } - handler := newThrottledUpgradeAwareProxyHandler(location, transport, false, responder, r.kubevirt.ServiceAccount) + handler := newThrottledUpgradeAwareProxyHandler(location, transport, false, responder, r.kubevirt.ServiceAccount, hooks...) + return handler, nil } @@ -79,6 +98,90 @@ func (r AddVolumeREST) ConnectMethods() []string { return []string{http.MethodPut} } +func (r AddVolumeREST) requestFromKubevirt(opts *subresources.VirtualMachineAddVolume) bool { + return opts == nil || (opts.Image == "" && opts.VolumeKind == "" && opts.PVCName == "") +} + +func (r AddVolumeREST) genMutateRequestHook(opts *subresources.VirtualMachineAddVolume) (mutateRequestHook, error) { + var dd virtv1.DiskDevice + if opts.IsCdrom { + dd.CDRom = &virtv1.CDRomTarget{ + Bus: virtv1.DiskBusSCSI, + } + } else { + dd.Disk = &virtv1.DiskTarget{ + Bus: virtv1.DiskBusSCSI, + } + } + + hotplugRequest := AddVolumeOptions{ + Name: opts.Name, + Disk: &virtv1.Disk{ + Name: opts.Name, + DiskDevice: dd, + Serial: opts.Name, + }, + } + switch opts.VolumeKind { + case "VirtualDisk": + if opts.PVCName == "" { + return nil, fmt.Errorf("must specify PVCName") + } + hotplugRequest.VolumeSource = &HotplugVolumeSource{ + PersistentVolumeClaim: &virtv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: opts.PVCName, + }, + Hotpluggable: true, + }, + } + case "VirtualImage": + switch { + case opts.PVCName != "" && opts.Image != "": + return nil, fmt.Errorf("must specify only one of PersistentVolumeClaimName or Image") + case opts.PVCName != "": + hotplugRequest.VolumeSource = &HotplugVolumeSource{ + PersistentVolumeClaim: &virtv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: opts.PVCName, + }, + Hotpluggable: true, + }, + } + case opts.Image != "": + hotplugRequest.VolumeSource = &HotplugVolumeSource{ + ContainerDisk: &ContainerDiskSource{ + Image: opts.Image, + Hotpluggable: true, + }, + } + default: + return nil, fmt.Errorf("must specify one of PersistentVolumeClaimName or Image") + } + case "ClusterVirtualImage": + if opts.Image == "" { + return nil, fmt.Errorf("must specify Image") + } + hotplugRequest.VolumeSource = &HotplugVolumeSource{ + ContainerDisk: &ContainerDiskSource{ + Image: opts.Image, + Hotpluggable: true, + }, + } + default: + return nil, fmt.Errorf("invalid volume kind: %s", opts.VolumeKind) + } + + newBody, err := json.Marshal(&hotplugRequest) + if err != nil { + return nil, err + } + + return func(req *http.Request) error { + return rewriteBody(req, newBody) + }, nil +} + func AddVolumeLocation( ctx context.Context, getter virtlisters.VirtualMachineLister, @@ -86,6 +189,32 @@ func AddVolumeLocation( opts *subresources.VirtualMachineAddVolume, kubevirt KubevirtApiServerConfig, proxyCertManager certmanager.CertificateManager, + addVolumePather pather, ) (*url.URL, *http.Transport, error) { - return streamLocation(ctx, getter, name, opts, newKVVMIPather("addvolume"), kubevirt, proxyCertManager) + return streamLocation(ctx, getter, name, opts, addVolumePather, kubevirt, proxyCertManager) +} + +type VirtualMachineVolumeRequest struct { + AddVolumeOptions *AddVolumeOptions `json:"addVolumeOptions,omitempty" optional:"true"` + RemoveVolumeOptions *virtv1.RemoveVolumeOptions `json:"removeVolumeOptions,omitempty" optional:"true"` +} +type AddVolumeOptions struct { + Name string `json:"name"` + Disk *virtv1.Disk `json:"disk"` + VolumeSource *HotplugVolumeSource `json:"volumeSource"` + DryRun []string `json:"dryRun,omitempty"` +} + +type HotplugVolumeSource struct { + PersistentVolumeClaim *virtv1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + DataVolume *virtv1.DataVolumeSource `json:"dataVolume,omitempty"` + ContainerDisk *ContainerDiskSource `json:"containerDisk,omitempty"` +} + +type ContainerDiskSource struct { + Image string `json:"image"` + ImagePullSecret string `json:"imagePullSecret,omitempty"` + Path string `json:"path,omitempty"` + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + Hotpluggable bool `json:"hotpluggable,omitempty"` } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/remove_volume.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/remove_volume.go index d3c665113b..f279a4ecb2 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/remove_volume.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/remove_volume.go @@ -18,12 +18,14 @@ package rest import ( "context" + "encoding/json" "fmt" "net/http" "net/url" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/rest" + virtv1 "kubevirt.io/api/core/v1" "github.com/deckhouse/virtualization-controller/pkg/tls/certmanager" virtlisters "github.com/deckhouse/virtualization/api/client/generated/listers/core/v1alpha2" @@ -61,11 +63,27 @@ func (r RemoveVolumeREST) Connect(ctx context.Context, name string, opts runtime if !ok { return nil, fmt.Errorf("invalid options object: %#v", opts) } - location, transport, err := RemoveVolumeRESTLocation(ctx, r.vmLister, name, removeVolumeOpts, r.kubevirt, r.proxyCertManager) + var ( + removeVolumePather pather + hooks []mutateRequestHook + ) + + if r.requestFromKubevirt(removeVolumeOpts) { + removeVolumePather = newKVVMIPather("removevolume") + } else { + removeVolumePather = newKVVMPather("removevolume") + h, err := r.genMutateRequestHook(removeVolumeOpts) + if err != nil { + return nil, err + } + hooks = append(hooks, h) + } + + location, transport, err := RemoveVolumeRESTLocation(ctx, r.vmLister, name, removeVolumeOpts, r.kubevirt, r.proxyCertManager, removeVolumePather) if err != nil { return nil, err } - handler := newThrottledUpgradeAwareProxyHandler(location, transport, false, responder, r.kubevirt.ServiceAccount) + handler := newThrottledUpgradeAwareProxyHandler(location, transport, false, responder, r.kubevirt.ServiceAccount, hooks...) return handler, nil } @@ -79,6 +97,25 @@ func (r RemoveVolumeREST) ConnectMethods() []string { return []string{http.MethodPut} } +func (r RemoveVolumeREST) requestFromKubevirt(opts *subresources.VirtualMachineRemoveVolume) bool { + return opts == nil || opts.Name == "" +} + +func (r RemoveVolumeREST) genMutateRequestHook(opts *subresources.VirtualMachineRemoveVolume) (mutateRequestHook, error) { + unplugRequest := virtv1.RemoveVolumeOptions{ + Name: opts.Name, + } + + newBody, err := json.Marshal(&unplugRequest) + if err != nil { + return nil, err + } + + return func(req *http.Request) error { + return rewriteBody(req, newBody) + }, nil +} + func RemoveVolumeRESTLocation( ctx context.Context, getter virtlisters.VirtualMachineLister, @@ -86,6 +123,7 @@ func RemoveVolumeRESTLocation( opts *subresources.VirtualMachineRemoveVolume, kubevirt KubevirtApiServerConfig, proxyCertManager certmanager.CertificateManager, + removeVolumePather pather, ) (*url.URL, *http.Transport, error) { - return streamLocation(ctx, getter, name, opts, newKVVMIPather("removevolume"), kubevirt, proxyCertManager) + return streamLocation(ctx, getter, name, opts, removeVolumePather, kubevirt, proxyCertManager) } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go index fe239be6f1..ea294f9ae1 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go @@ -17,10 +17,12 @@ limitations under the License. package rest import ( + "bytes" "context" "crypto/tls" "crypto/x509" "fmt" + "io" "net/http" "net/url" "os" @@ -143,18 +145,42 @@ func streamParams(_ url.Values, opts runtime.Object) error { } } +type mutateRequestHook func(req *http.Request) error + func newThrottledUpgradeAwareProxyHandler( location *url.URL, transport *http.Transport, upgradeRequired bool, responder rest.Responder, sa types.NamespacedName, + mutateHooks ...mutateRequestHook, ) http.Handler { var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { r.Header.Add(userHeader, fmt.Sprintf("system:serviceaccount:%s:%s", sa.Namespace, sa.Name)) r.Header.Add(groupHeader, "system:serviceaccounts") + for _, hook := range mutateHooks { + if hook != nil { + if err := hook(r); err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + } + } proxyHandler := proxy.NewUpgradeAwareHandler(location, transport, false, upgradeRequired, proxy.NewErrorResponder(responder)) proxyHandler.ServeHTTP(w, r) } return handler } + +func rewriteBody(req *http.Request, newBody []byte) error { + if req.Body != nil { + err := req.Body.Close() + if err != nil { + return err + } + } + req.Body = io.NopCloser(bytes.NewBuffer(newBody)) + req.ContentLength = int64(len(newBody)) + return nil +} diff --git a/images/virtualization-artifact/pkg/controller/kvapi/kvapi.go b/images/virtualization-artifact/pkg/controller/kvapi/kvapi.go index 1ecdb05025..3594058648 100644 --- a/images/virtualization-artifact/pkg/controller/kvapi/kvapi.go +++ b/images/virtualization-artifact/pkg/controller/kvapi/kvapi.go @@ -32,6 +32,7 @@ type Kubevirt interface { HotplugVolumesEnabled() bool } +// Deprecated: use virt client. func New(cli client.Client, kv Kubevirt) *KvApi { return &KvApi{ Client: cli, @@ -39,15 +40,18 @@ func New(cli client.Client, kv Kubevirt) *KvApi { } } +// Deprecated: use virt client. type KvApi struct { client.Client kubevirt Kubevirt } +// Deprecated: use virt client. func (api *KvApi) AddVolume(ctx context.Context, kvvm *virtv1.VirtualMachine, opts *virtv1.AddVolumeOptions) error { return api.addVolume(ctx, kvvm, opts) } +// Deprecated: use virt client. func (api *KvApi) RemoveVolume(ctx context.Context, kvvm *virtv1.VirtualMachine, opts *virtv1.RemoveVolumeOptions) error { return api.removeVolume(ctx, kvvm, opts) } @@ -187,6 +191,7 @@ func volumeNameExists(volume virtv1.Volume, volumeName string) bool { } func volumeSourceExists(volume virtv1.Volume, volumeName string) bool { + // Do not add ContainerDisk!!! return (volume.DataVolume != nil && volume.DataVolume.Name == volumeName) || (volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName == volumeName) } diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go index ee8798b320..9f2ed0aef0 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go @@ -50,8 +50,13 @@ func GenerateCVMIDiskName(name string) string { } func GetOriginalDiskName(prefixedName string) (string, bool) { - if strings.HasPrefix(prefixedName, VMDDiskPrefix) { + switch { + case strings.HasPrefix(prefixedName, VMDDiskPrefix): return strings.TrimPrefix(prefixedName, VMDDiskPrefix), true + case strings.HasPrefix(prefixedName, VMIDiskPrefix): + return strings.TrimPrefix(prefixedName, VMIDiskPrefix), true + case strings.HasPrefix(prefixedName, CVMIDiskPrefix): + return strings.TrimPrefix(prefixedName, CVMIDiskPrefix), true } return prefixedName, false diff --git a/images/virtualization-artifact/pkg/controller/service/attachment_service.go b/images/virtualization-artifact/pkg/controller/service/attachment_service.go index b9627a2732..173b467e6a 100644 --- a/images/virtualization-artifact/pkg/controller/service/attachment_service.go +++ b/images/virtualization-artifact/pkg/controller/service/attachment_service.go @@ -28,34 +28,36 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common/object" - "github.com/deckhouse/virtualization-controller/pkg/controller/kubevirt" "github.com/deckhouse/virtualization-controller/pkg/controller/kvapi" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" + "github.com/deckhouse/virtualization/api/client/kubeclient" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) type AttachmentService struct { client Client + virtClient kubeclient.Client controllerNamespace string } -func NewAttachmentService(client Client, controllerNamespace string) *AttachmentService { +func NewAttachmentService(client Client, virtClient kubeclient.Client, controllerNamespace string) *AttachmentService { return &AttachmentService{ client: client, + virtClient: virtClient, controllerNamespace: controllerNamespace, } } var ( - ErrVolumeStatusNotReady = errors.New("hotplug is not ready") - ErrDiskIsSpecAttached = errors.New("virtual disk is already attached to the virtual machine spec") - ErrHotPlugRequestAlreadySent = errors.New("attachment request is already sent") - ErrVirtualMachineWaitsForRestartApproval = errors.New("virtual machine waits for restart approval") + ErrVolumeStatusNotReady = errors.New("hotplug is not ready") + ErrBlockDeviceIsSpecAttached = errors.New("block device is already attached to the virtual machine spec") + ErrHotPlugRequestAlreadySent = errors.New("attachment request is already sent") ) -func (s AttachmentService) IsHotPlugged(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) (bool, error) { - if vd == nil { - return false, errors.New("cannot check if a nil VirtualDisk is hot plugged") +func (s AttachmentService) IsHotPlugged(ad *AttachmentDisk, vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) (bool, error) { + if ad == nil { + return false, errors.New("cannot check if a empty AttachmentDisk is hot plugged") } if vm == nil { @@ -67,7 +69,7 @@ func (s AttachmentService) IsHotPlugged(vd *virtv2.VirtualDisk, vm *virtv2.Virtu } for _, vs := range kvvmi.Status.VolumeStatus { - if vs.HotplugVolume != nil && vs.Name == kvbuilder.GenerateVMDDiskName(vd.Name) { + if vs.HotplugVolume != nil && vs.Name == ad.GenerateName { if vs.Phase == virtv1.VolumeReady { return true, nil } @@ -79,9 +81,9 @@ func (s AttachmentService) IsHotPlugged(vd *virtv2.VirtualDisk, vm *virtv2.Virtu return false, nil } -func (s AttachmentService) CanHotPlug(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) (bool, error) { - if vd == nil { - return false, errors.New("cannot hot plug a nil VirtualDisk") +func (s AttachmentService) CanHotPlug(ad *AttachmentDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) (bool, error) { + if ad == nil { + return false, errors.New("cannot hot plug a nil AttachmentDisk") } if vm == nil { @@ -93,12 +95,12 @@ func (s AttachmentService) CanHotPlug(vd *virtv2.VirtualDisk, vm *virtv2.Virtual } for _, bdr := range vm.Spec.BlockDeviceRefs { - if bdr.Kind == virtv2.DiskDevice && bdr.Name == vd.Name { - return false, fmt.Errorf("%w: virtual machine has a virtual disk reference, but it is not a hot-plugged volume", ErrDiskIsSpecAttached) + if bdr.Kind == ad.Kind && bdr.Name == ad.Name { + return false, fmt.Errorf("%w: virtual machine has a block device reference, but it is not a hot-plugged volume", ErrBlockDeviceIsSpecAttached) } } - name := kvbuilder.GenerateVMDDiskName(vd.Name) + name := ad.GenerateName if kvvm.Spec.Template != nil { for _, vs := range kvvm.Spec.Template.Spec.Volumes { @@ -108,7 +110,7 @@ func (s AttachmentService) CanHotPlug(vd *virtv2.VirtualDisk, vm *virtv2.Virtual } if !vs.PersistentVolumeClaim.Hotpluggable { - return false, fmt.Errorf("%w: virtual machine has a virtual disk reference, but it is not a hot-plugged volume", ErrDiskIsSpecAttached) + return false, fmt.Errorf("%w: virtual machine has a block device reference, but it is not a hot-plugged volume", ErrBlockDeviceIsSpecAttached) } return false, ErrHotPlugRequestAlreadySent @@ -122,16 +124,12 @@ func (s AttachmentService) CanHotPlug(vd *virtv2.VirtualDisk, vm *virtv2.Virtual } } - if len(vm.Status.RestartAwaitingChanges) > 0 { - return false, ErrVirtualMachineWaitsForRestartApproval - } - return true, nil } -func (s AttachmentService) HotPlugDisk(ctx context.Context, vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { - if vd == nil { - return errors.New("cannot hot plug a nil VirtualDisk") +func (s AttachmentService) HotPlugDisk(ctx context.Context, ad *AttachmentDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { + if ad == nil { + return errors.New("cannot hot plug a nil AttachmentDisk") } if vm == nil { @@ -142,49 +140,22 @@ func (s AttachmentService) HotPlugDisk(ctx context.Context, vd *virtv2.VirtualDi return errors.New("cannot hot plug a disk into a nil KVVM") } - name := kvbuilder.GenerateVMDDiskName(vd.Name) - - hotplugRequest := virtv1.AddVolumeOptions{ - Name: name, - Disk: &virtv1.Disk{ - Name: name, - DiskDevice: virtv1.DiskDevice{ - Disk: &virtv1.DiskTarget{ - Bus: "scsi", - }, - }, - Serial: vd.Name, - }, - VolumeSource: &virtv1.HotplugVolumeSource{ - PersistentVolumeClaim: &virtv1.PersistentVolumeClaimVolumeSource{ - PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: vd.Status.Target.PersistentVolumeClaim, - }, - Hotpluggable: true, - }, - }, - } - - kv, err := kubevirt.New(ctx, s.client, s.controllerNamespace) - if err != nil { - return err - } - - err = kvapi.New(s.client, kv).AddVolume(ctx, kvvm, &hotplugRequest) - if err != nil { - return fmt.Errorf("error adding volume, %w", err) - } - - return nil + return s.virtClient.VirtualMachines(vm.GetNamespace()).AddVolume(ctx, vm.GetName(), v1alpha2.VirtualMachineAddVolume{ + VolumeKind: string(ad.Kind), + Name: ad.GenerateName, + Image: ad.Image, + PVCName: ad.PVCName, + IsCdrom: ad.IsCdrom, + }) } -func (s AttachmentService) CanUnplug(vd *virtv2.VirtualDisk, kvvm *virtv1.VirtualMachine) bool { - if vd == nil || kvvm == nil || kvvm.Spec.Template == nil { +func (s AttachmentService) CanUnplug(kvvm *virtv1.VirtualMachine, diskName string) bool { + if diskName == "" || kvvm == nil || kvvm.Spec.Template == nil { return false } for _, volume := range kvvm.Spec.Template.Spec.Volumes { - if kvapi.VolumeExists(volume, kvbuilder.GenerateVMDDiskName(vd.Name)) { + if kvapi.VolumeExists(volume, diskName) { return true } } @@ -192,26 +163,16 @@ func (s AttachmentService) CanUnplug(vd *virtv2.VirtualDisk, kvvm *virtv1.Virtua return false } -func (s AttachmentService) UnplugDisk(ctx context.Context, vd *virtv2.VirtualDisk, kvvm *virtv1.VirtualMachine) error { - if vd == nil || kvvm == nil { - return nil - } - - unplugRequest := virtv1.RemoveVolumeOptions{ - Name: kvbuilder.GenerateVMDDiskName(vd.Name), - } - - kv, err := kubevirt.New(ctx, s.client, s.controllerNamespace) - if err != nil { - return err +func (s AttachmentService) UnplugDisk(ctx context.Context, kvvm *virtv1.VirtualMachine, diskName string) error { + if kvvm == nil { + return errors.New("cannot unplug a disk from a nil KVVM") } - - err = kvapi.New(s.client, kv).RemoveVolume(ctx, kvvm, &unplugRequest) - if err != nil { - return fmt.Errorf("error removing volume, %w", err) + if diskName == "" { + return errors.New("cannot unplug a disk with a empty DiskName") } - - return nil + return s.virtClient.VirtualMachines(kvvm.GetNamespace()).RemoveVolume(ctx, kvvm.GetName(), v1alpha2.VirtualMachineRemoveVolume{ + Name: diskName, + }) } // IsConflictedAttachment returns true if the provided VMBDA conflicts with another @@ -235,6 +196,11 @@ func (s AttachmentService) UnplugDisk(ctx context.Context, vd *virtv2.VirtualDis // T1: -->VMBDA A Should be Non-Conflicted lexicographically // T1: VMBDA B Phase: "" func (s AttachmentService) IsConflictedAttachment(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (bool, string, error) { + // CVI and VI always has no conflicts. Skip + if vmbda.Spec.BlockDeviceRef.Kind == virtv2.ClusterVirtualImageKind || vmbda.Spec.BlockDeviceRef.Kind == virtv2.VirtualImageKind { + return false, "", nil + } + var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList err := s.client.List(ctx, &vmbdas, &client.ListOptions{Namespace: vmbda.Namespace}) if err != nil { @@ -272,8 +238,16 @@ func (s AttachmentService) GetVirtualDisk(ctx context.Context, name, namespace s return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualDisk{}) } -func (s AttachmentService) GetPersistentVolumeClaim(ctx context.Context, vd *virtv2.VirtualDisk) (*corev1.PersistentVolumeClaim, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: vd.Namespace, Name: vd.Status.Target.PersistentVolumeClaim}, s.client, &corev1.PersistentVolumeClaim{}) +func (s AttachmentService) GetVirtualImage(ctx context.Context, name, namespace string) (*virtv2.VirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualImage{}) +} + +func (s AttachmentService) GetClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &virtv2.ClusterVirtualImage{}) +} + +func (s AttachmentService) GetPersistentVolumeClaim(ctx context.Context, ad *AttachmentDisk) (*corev1.PersistentVolumeClaim, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: ad.Namespace, Name: ad.PVCName}, s.client, &corev1.PersistentVolumeClaim{}) } func (s AttachmentService) GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) { @@ -291,3 +265,51 @@ func (s AttachmentService) GetKVVMI(ctx context.Context, vm *virtv2.VirtualMachi func isSameBlockDeviceRefs(a, b virtv2.VMBDAObjectRef) bool { return a.Kind == b.Kind && a.Name == b.Name } + +type AttachmentDisk struct { + Kind virtv2.BlockDeviceKind + Name string + Namespace string + GenerateName string + PVCName string + Image string + IsCdrom bool +} + +func NewAttachmentDiskFromVirtualDisk(vd *virtv2.VirtualDisk) *AttachmentDisk { + return &AttachmentDisk{ + Kind: virtv2.DiskDevice, + Name: vd.GetName(), + Namespace: vd.GetNamespace(), + GenerateName: kvbuilder.GenerateVMDDiskName(vd.GetName()), + PVCName: vd.Status.Target.PersistentVolumeClaim, + } +} + +func NewAttachmentDiskFromVirtualImage(vi *virtv2.VirtualImage) *AttachmentDisk { + ad := AttachmentDisk{ + Kind: virtv2.ImageDevice, + Name: vi.GetName(), + Namespace: vi.GetNamespace(), + GenerateName: kvbuilder.GenerateVMIDiskName(vi.GetName()), + IsCdrom: vi.Status.CDROM, + } + + if vi.Spec.Storage == virtv2.StorageContainerRegistry { + ad.Image = vi.Status.Target.RegistryURL + } else { + ad.PVCName = vi.Status.Target.PersistentVolumeClaim + } + + return &ad +} + +func NewAttachmentDiskFromClusterVirtualImage(cvi *virtv2.ClusterVirtualImage) *AttachmentDisk { + return &AttachmentDisk{ + Kind: virtv2.ClusterImageDevice, + Name: cvi.GetName(), + GenerateName: kvbuilder.GenerateCVMIDiskName(cvi.GetName()), + Image: cvi.Status.Target.RegistryURL, + IsCdrom: cvi.Status.CDROM, + } +} diff --git a/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go b/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go index e1dc492775..8fa52a3c21 100644 --- a/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go +++ b/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go @@ -75,7 +75,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { return nil } - s := NewAttachmentService(clientMock, "") + s := NewAttachmentService(clientMock, nil, "") isConflicted, conflictWithName, err := s.IsConflictedAttachment(context.Background(), vmbdaAlpha) Expect(err).To(BeNil()) Expect(isConflicted).To(BeTrue()) @@ -94,7 +94,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { return nil } - s := NewAttachmentService(clientMock, "") + s := NewAttachmentService(clientMock, nil, "") isConflicted, conflictWithName, err := s.IsConflictedAttachment(context.Background(), vmbdaAlpha) Expect(err).To(BeNil()) Expect(isConflicted).To(BeFalse()) @@ -113,7 +113,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { return nil } - s := NewAttachmentService(clientMock, "") + s := NewAttachmentService(clientMock, nil, "") isConflicted, conflictWithName, err := s.IsConflictedAttachment(context.Background(), vmbdaAlpha) Expect(err).To(BeNil()) Expect(isConflicted).To(BeTrue()) @@ -132,7 +132,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { return nil } - s := NewAttachmentService(clientMock, "") + s := NewAttachmentService(clientMock, nil, "") isConflicted, conflictWithName, err := s.IsConflictedAttachment(context.Background(), vmbdaAlpha) Expect(err).To(BeNil()) Expect(isConflicted).To(BeFalse()) @@ -150,7 +150,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { return nil } - s := NewAttachmentService(clientMock, "") + s := NewAttachmentService(clientMock, nil, "") isConflicted, conflictWithName, err := s.IsConflictedAttachment(context.Background(), vmbdaAlpha) Expect(err).To(BeNil()) Expect(isConflicted).To(BeFalse()) @@ -165,7 +165,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { return nil } - s := NewAttachmentService(clientMock, "") + s := NewAttachmentService(clientMock, nil, "") isConflicted, conflictWithName, err := s.IsConflictedAttachment(context.Background(), vmbdaAlpha) Expect(err).To(BeNil()) Expect(isConflicted).To(BeFalse()) diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go index c32aaa808f..9d41c74e41 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go @@ -110,7 +110,8 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - pvc, err := h.attachment.GetPersistentVolumeClaim(ctx, vd) + ad := service.NewAttachmentDiskFromVirtualDisk(vd) + pvc, err := h.attachment.GetPersistentVolumeClaim(ctx, ad) if err != nil { return reconcile.Result{}, err } @@ -131,6 +132,128 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } + cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) + return reconcile.Result{}, nil + case virtv2.VMBDAObjectRefKindVirtualImage: + viKey := types.NamespacedName{ + Name: vmbda.Spec.BlockDeviceRef.Name, + Namespace: vmbda.Namespace, + } + + vi, err := h.attachment.GetVirtualImage(ctx, viKey.Name, viKey.Namespace) + if err != nil { + return reconcile.Result{}, err + } + + if vi == nil { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message(fmt.Sprintf("VirtualImage %q not found.", viKey.String())) + return reconcile.Result{}, nil + } + + if vi.Generation != vi.Status.ObservedGeneration { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message(fmt.Sprintf("Waiting for the VirtualImage %q to be observed in its latest state generation.", viKey.String())) + return reconcile.Result{}, nil + } + + if vi.Status.Phase != virtv2.ImageReady { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message(fmt.Sprintf("VirtualImage %q is not ready to be attached to the virtual machine: waiting for the VirtualImage to be ready for attachment.", viKey.String())) + return reconcile.Result{}, nil + } + switch vi.Spec.Storage { + case virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim: + if vi.Status.Target.PersistentVolumeClaim == "" { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message("Waiting until VirtualImage has associated PersistentVolumeClaim name.") + return reconcile.Result{}, nil + } + ad := service.NewAttachmentDiskFromVirtualImage(vi) + pvc, err := h.attachment.GetPersistentVolumeClaim(ctx, ad) + if err != nil { + return reconcile.Result{}, err + } + + if pvc == nil { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message(fmt.Sprintf("Underlying PersistentVolumeClaim %q not found.", vi.Status.Target.PersistentVolumeClaim)) + return reconcile.Result{}, nil + } + + if vi.Status.Phase == virtv2.ImageReady && pvc.Status.Phase != corev1.ClaimBound { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message(fmt.Sprintf("Underlying PersistentVolumeClaim %q not bound.", vi.Status.Target.PersistentVolumeClaim)) + return reconcile.Result{}, nil + } + + cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) + + case virtv2.StorageContainerRegistry: + if vi.Status.Target.RegistryURL == "" { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message("Waiting until VirtualImage has associated RegistryUrl.") + return reconcile.Result{}, nil + } + } + + cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) + return reconcile.Result{}, nil + case virtv2.VMBDAObjectRefKindClusterVirtualImage: + cviKey := types.NamespacedName{ + Name: vmbda.Spec.BlockDeviceRef.Name, + } + + cvi, err := h.attachment.GetClusterVirtualImage(ctx, cviKey.Name) + if err != nil { + return reconcile.Result{}, err + } + + if cvi == nil { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message(fmt.Sprintf("ClusterVirtualImage %q not found.", cviKey.String())) + return reconcile.Result{}, nil + } + if cvi.Generation != cvi.Status.ObservedGeneration { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message(fmt.Sprintf("Waiting for the ClusterVirtualImage %q to be observed in its latest state generation.", cviKey.String())) + return reconcile.Result{}, nil + } + + if cvi.Status.Phase != virtv2.ImageReady { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message(fmt.Sprintf("ClusterVirtualImage %q is not ready to be attached to the virtual machine: waiting for the ClusterVirtualImage to be ready for attachment.", cviKey.String())) + return reconcile.Result{}, nil + } + + if cvi.Status.Target.RegistryURL == "" { + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.BlockDeviceNotReady). + Message("Waiting until VirtualImage has associated RegistryUrl.") + return reconcile.Result{}, nil + } + cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) return reconcile.Result{}, nil default: diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go index fec026765e..7cd3eec2de 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go @@ -18,30 +18,85 @@ package internal import ( "context" + "log/slog" + "strings" + "k8s.io/apimachinery/pkg/types" + virtv1 "kubevirt.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/deckhouse/virtualization-controller/pkg/common/object" + "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" -type DeletionHandler struct{} +type UnplugInterface interface { + CanUnplug(kvvm *virtv1.VirtualMachine, diskName string) bool + UnplugDisk(ctx context.Context, kvvm *virtv1.VirtualMachine, diskName string) error +} +type DeletionHandler struct { + unplug UnplugInterface + client client.Client + + log *slog.Logger +} -func NewDeletionHandler() *DeletionHandler { - return &DeletionHandler{} +func NewDeletionHandler(unplug UnplugInterface, client client.Client) *DeletionHandler { + return &DeletionHandler{ + unplug: unplug, + client: client, + } } -func (h DeletionHandler) Handle(ctx context.Context, vd *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { - log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) - if vd.DeletionTimestamp != nil { - log.Info("Deletion observed: remove cleanup finalizer from VirtualMachineBlockDeviceAttachment") - controllerutil.RemoveFinalizer(vd, virtv2.FinalizerVMBDACleanup) +func (h *DeletionHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { + h.log = logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) + + if vmbda.DeletionTimestamp != nil { + if err := h.cleanUp(ctx, vmbda); err != nil { + return reconcile.Result{}, err + } + h.log.Info("Deletion observed: remove cleanup finalizer from VirtualMachineBlockDeviceAttachment") + controllerutil.RemoveFinalizer(vmbda, virtv2.FinalizerVMBDACleanup) return reconcile.Result{}, nil } - controllerutil.AddFinalizer(vd, virtv2.FinalizerVMBDACleanup) + controllerutil.AddFinalizer(vmbda, virtv2.FinalizerVMBDACleanup) return reconcile.Result{}, nil } + +func (h *DeletionHandler) cleanUp(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) error { + if vmbda == nil { + return nil + } + + var diskName string + switch vmbda.Spec.BlockDeviceRef.Kind { + case virtv2.VMBDAObjectRefKindVirtualDisk: + diskName = kvbuilder.GenerateVMDDiskName(vmbda.Spec.BlockDeviceRef.Name) + case virtv2.VMBDAObjectRefKindVirtualImage: + diskName = kvbuilder.GenerateVMIDiskName(vmbda.Spec.BlockDeviceRef.Name) + case virtv2.VMBDAObjectRefKindClusterVirtualImage: + diskName = kvbuilder.GenerateCVMIDiskName(vmbda.Spec.BlockDeviceRef.Name) + } + + kvvm, err := object.FetchObject(ctx, types.NamespacedName{Namespace: vmbda.GetNamespace(), Name: vmbda.Spec.VirtualMachineName}, h.client, &virtv1.VirtualMachine{}) + if err != nil { + return err + } + + if h.unplug.CanUnplug(kvvm, diskName) { + h.log.Info("Unplug Virtual Disk", slog.String("diskName", diskName), slog.String("vm", kvvm.Name)) + if err = h.unplug.UnplugDisk(ctx, kvvm, diskName); err != nil { + if strings.Contains(err.Error(), "does not exist") { + return nil + } + return err + } + } + return nil +} diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go index 7656b090bb..1776b408d1 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go @@ -54,9 +54,32 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi cb.Status(metav1.ConditionUnknown).Reason(conditions.ReasonUnknown) } - vd, err := h.attacher.GetVirtualDisk(ctx, vmbda.Spec.BlockDeviceRef.Name, vmbda.Namespace) - if err != nil { - return reconcile.Result{}, err + var ad *service.AttachmentDisk + switch vmbda.Spec.BlockDeviceRef.Kind { + case virtv2.VMBDAObjectRefKindVirtualDisk: + vd, err := h.attacher.GetVirtualDisk(ctx, vmbda.Spec.BlockDeviceRef.Name, vmbda.Namespace) + if err != nil { + return reconcile.Result{}, err + } + if vd != nil { + ad = service.NewAttachmentDiskFromVirtualDisk(vd) + } + case virtv2.VMBDAObjectRefKindVirtualImage: + vi, err := h.attacher.GetVirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name, vmbda.Namespace) + if err != nil { + return reconcile.Result{}, err + } + if vi != nil { + ad = service.NewAttachmentDiskFromVirtualImage(vi) + } + case virtv2.VMBDAObjectRefKindClusterVirtualImage: + cvi, err := h.attacher.GetClusterVirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name) + if err != nil { + return reconcile.Result{}, err + } + if cvi != nil { + ad = service.NewAttachmentDiskFromClusterVirtualImage(cvi) + } } vm, err := h.attacher.GetVirtualMachine(ctx, vmbda.Spec.VirtualMachineName, vmbda.Namespace) @@ -73,18 +96,6 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if vmbda.DeletionTimestamp != nil { - switch vmbda.Status.Phase { - case virtv2.BlockDeviceAttachmentPhasePending, - virtv2.BlockDeviceAttachmentPhaseInProgress, - virtv2.BlockDeviceAttachmentPhaseAttached: - if h.attacher.CanUnplug(vd, kvvm) { - err = h.attacher.UnplugDisk(ctx, vd, kvvm) - if err != nil { - return reconcile.Result{}, err - } - } - } - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseTerminating cb.Status(metav1.ConditionUnknown).Reason(conditions.ReasonUnknown) @@ -138,12 +149,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi return reconcile.Result{}, nil } - if vd == nil { + if ad == nil { vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). - Message(fmt.Sprintf("VirtualDisk %q not found.", vmbda.Spec.BlockDeviceRef.Name)) + Message(fmt.Sprintf("AttachmentDisk %q not found.", vmbda.Spec.BlockDeviceRef.Name)) return reconcile.Result{}, nil } @@ -179,10 +190,10 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi return reconcile.Result{}, nil } - log = log.With("vmName", vm.Name, "vdName", vd.Name) + log = log.With("vmName", vm.Name, "attachmentDiskName", ad.Name) log.Info("Check if hot plug is completed and disk is attached") - isHotPlugged, err := h.attacher.IsHotPlugged(vd, vm, kvvmi) + isHotPlugged, err := h.attacher.IsHotPlugged(ad, vm, kvvmi) if err != nil { if errors.Is(err, service.ErrVolumeStatusNotReady) { vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseInProgress @@ -207,14 +218,14 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi return reconcile.Result{}, nil } - _, err = h.attacher.CanHotPlug(vd, vm, kvvm) + _, err = h.attacher.CanHotPlug(ad, vm, kvvm) blockDeviceLimitCondition, _ := conditions.GetCondition(vmbdacondition.DiskAttachmentCapacityAvailableType, vmbda.Status.Conditions) switch { case err == nil && blockDeviceLimitCondition.Status == metav1.ConditionTrue: log.Info("Send attachment request") - err = h.attacher.HotPlugDisk(ctx, vd, vm, kvvm) + err = h.attacher.HotPlugDisk(ctx, ad, vm, kvvm) if err != nil { return reconcile.Result{}, err } @@ -225,7 +236,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi Reason(vmbdacondition.AttachmentRequestSent). Message("Attachment request has sent: attachment is in progress.") return reconcile.Result{}, nil - case errors.Is(err, service.ErrDiskIsSpecAttached): + case errors.Is(err, service.ErrBlockDeviceIsSpecAttached): log.Info("VirtualDisk is already attached to the virtual machine spec") vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseFailed @@ -243,15 +254,6 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi Reason(vmbdacondition.AttachmentRequestSent). Message("Attachment request sent: attachment is in progress.") return reconcile.Result{}, nil - case errors.Is(err, service.ErrVirtualMachineWaitsForRestartApproval): - log.Info("Virtual machine waits for restart approval") - - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending - cb. - Status(metav1.ConditionFalse). - Reason(vmbdacondition.NotAttached). - Message(service.CapitalizeFirstLetter(err.Error())) - return reconcile.Result{}, nil case blockDeviceLimitCondition.Status != metav1.ConditionTrue: log.Info("Virtual machine block device capacity reached") diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go new file mode 100644 index 0000000000..aec0a34042 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go @@ -0,0 +1,106 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + "context" + "fmt" + "log/slog" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" +) + +type ClusterVirtualImageWatcher struct { + client client.Client +} + +func NewClusterVirtualImageWatcher(client client.Client) *ClusterVirtualImageWatcher { + return &ClusterVirtualImageWatcher{ + client: client, + } +} + +func (w ClusterVirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { + return ctr.Watch( + source.Kind(mgr.GetCache(), &virtv2.ClusterVirtualImage{}), + handler.EnqueueRequestsFromMapFunc(w.enqueueRequests), + predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { return true }, + UpdateFunc: w.filterUpdateEvents, + }, + ) +} + +func (w ClusterVirtualImageWatcher) enqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { + var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList + err := w.client.List(ctx, &vmbdas) + if err != nil { + slog.Default().Error(fmt.Sprintf("failed to list vmbdas: %s", err)) + return + } + + for _, vmbda := range vmbdas.Items { + if vmbda.Spec.BlockDeviceRef.Kind != virtv2.VMBDAObjectRefKindClusterVirtualImage && vmbda.Spec.BlockDeviceRef.Name != obj.GetName() { + continue + } + + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: vmbda.Name, + Namespace: vmbda.Namespace, + }, + }) + } + + return +} + +func (w ClusterVirtualImageWatcher) filterUpdateEvents(e event.UpdateEvent) bool { + oldCVI, ok := e.ObjectOld.(*virtv2.ClusterVirtualImage) + if !ok { + slog.Default().Error(fmt.Sprintf("expected an old ClusterVirtualImage but got a %T", e.ObjectOld)) + return false + } + + newCVI, ok := e.ObjectNew.(*virtv2.ClusterVirtualImage) + if !ok { + slog.Default().Error(fmt.Sprintf("expected a new ClusterVirtualImage but got a %T", e.ObjectNew)) + return false + } + + if oldCVI.Status.Phase != newCVI.Status.Phase { + return true + } + + oldReadyCondition, _ := conditions.GetCondition(cvicondition.ReadyType, oldCVI.Status.Conditions) + newReadyCondition, _ := conditions.GetCondition(cvicondition.ReadyType, newCVI.Status.Conditions) + + return oldReadyCondition.Status != newReadyCondition.Status +} diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go new file mode 100644 index 0000000000..4bb3c8b693 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go @@ -0,0 +1,108 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + "context" + "fmt" + "log/slog" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" +) + +type VirtualImageWatcher struct { + client client.Client +} + +func NewVirtualImageWatcherr(client client.Client) *VirtualImageWatcher { + return &VirtualImageWatcher{ + client: client, + } +} + +func (w VirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { + return ctr.Watch( + source.Kind(mgr.GetCache(), &virtv2.VirtualImage{}), + handler.EnqueueRequestsFromMapFunc(w.enqueueRequests), + predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { return true }, + UpdateFunc: w.filterUpdateEvents, + }, + ) +} + +func (w VirtualImageWatcher) enqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { + var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList + err := w.client.List(ctx, &vmbdas, &client.ListOptions{ + Namespace: obj.GetNamespace(), + }) + if err != nil { + slog.Default().Error(fmt.Sprintf("failed to list vmbdas: %s", err)) + return + } + + for _, vmbda := range vmbdas.Items { + if vmbda.Spec.BlockDeviceRef.Kind != virtv2.VMBDAObjectRefKindVirtualImage && vmbda.Spec.BlockDeviceRef.Name != obj.GetName() { + continue + } + + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: vmbda.Name, + Namespace: vmbda.Namespace, + }, + }) + } + + return +} + +func (w VirtualImageWatcher) filterUpdateEvents(e event.UpdateEvent) bool { + oldVI, ok := e.ObjectOld.(*virtv2.VirtualImage) + if !ok { + slog.Default().Error(fmt.Sprintf("expected an old VirtualImage but got a %T", e.ObjectOld)) + return false + } + + newVI, ok := e.ObjectNew.(*virtv2.VirtualImage) + if !ok { + slog.Default().Error(fmt.Sprintf("expected a new VirtualImage but got a %T", e.ObjectNew)) + return false + } + + if oldVI.Status.Phase != newVI.Status.Phase { + return true + } + + oldReadyCondition, _ := conditions.GetCondition(vicondition.ReadyType, oldVI.Status.Conditions) + newReadyCondition, _ := conditions.GetCondition(vicondition.ReadyType, newVI.Status.Conditions) + + return oldReadyCondition.Status != newReadyCondition.Status +} diff --git a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go index 1f5ff0cfa5..9ef2b135b9 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go @@ -31,6 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmbda/internal" "github.com/deckhouse/virtualization-controller/pkg/logger" vmbdametrics "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vmbda" + "github.com/deckhouse/virtualization/api/client/kubeclient" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" ) @@ -39,10 +40,11 @@ const ControllerName = "vmbda-controller" func NewController( ctx context.Context, mgr manager.Manager, + virtClient kubeclient.Client, lg *log.Logger, ns string, ) (controller.Controller, error) { - attacher := service.NewAttachmentService(mgr.GetClient(), ns) + attacher := service.NewAttachmentService(mgr.GetClient(), virtClient, ns) blockDeviceService := service.NewBlockDeviceService(mgr.GetClient()) reconciler := NewReconciler( @@ -51,7 +53,7 @@ func NewController( internal.NewBlockDeviceReadyHandler(attacher), internal.NewVirtualMachineReadyHandler(attacher), internal.NewLifeCycleHandler(attacher), - internal.NewDeletionHandler(), + internal.NewDeletionHandler(attacher, mgr.GetClient()), ) vmbdaController, err := controller.New(ControllerName, mgr, controller.Options{ diff --git a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go index fc4e78c94f..d0a63d9231 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go @@ -101,6 +101,8 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr watcher.NewVirtualMachineBlockDeviceAttachmentWatcher(mgr.GetClient()), watcher.NewVirtualMachineWatcher(mgr.GetClient()), watcher.NewVirtualDiskWatcher(mgr.GetClient()), + watcher.NewClusterVirtualImageWatcher(mgr.GetClient()), + watcher.NewVirtualImageWatcherr(mgr.GetClient()), watcher.NewKVVMIWatcher(mgr.GetClient()), } { err := w.Watch(mgr, ctr) diff --git a/templates/virtualization-controller/rbac-for-us.yaml b/templates/virtualization-controller/rbac-for-us.yaml index c873f497ff..6525433562 100644 --- a/templates/virtualization-controller/rbac-for-us.yaml +++ b/templates/virtualization-controller/rbac-for-us.yaml @@ -158,6 +158,8 @@ rules: - virtualmachines/freeze - virtualmachines/unfreeze - virtualmachines/migrate + - virtualmachines/addvolume + - virtualmachines/removevolume verbs: - update - apiGroups: