Skip to content

Commit

Permalink
Add "auto" provisioning type.
Browse files Browse the repository at this point in the history
Automatically use hostpath if pod is on same host as zfs pool.

This addresses #85. When storage class type is set to auto,
automatically create a hostpath volume when the scheduler selects the
specified node to run the pod, otherwise fallback to using NFS.

Note this only works when volumeBindingMode is set to
WaitForFirstConsumer in the storage class. Otherwise, when set to
Immediate, volumes will be pre-provisioned before the scheduler selects a
node for the pod consuming the volume, and options.SelectedNode will be
unset.
  • Loading branch information
jp39 committed Aug 12, 2024
1 parent 829d4a5 commit 52c1040
Show file tree
Hide file tree
Showing 8 changed files with 134 additions and 64 deletions.
26 changes: 26 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,12 @@ but the `PersistentVolume` objects will have a [NodeAffinity][node affinity] con

![architecture with Hostpath](architecture.hostpath.drawio.svg "Architecture with Hostpath provisioning")

As a third option, if the ZFS host is part of the cluster, you can let the provisioner choose
whether [NFS][nfs] or [HostPath][hostpath] is used with the `Auto` mode. If the scheduler
decides to place a Pod onto the ZFS host, *and* the requested access mode in the Persistent Volume
Claim is `ReadWriteOnce` (the volume can only be accessed by pods running on the same node)
[HostPath][hostpath] will automatically be used, otherwise [NFS][nfs] will be used.

Currently all ZFS attributes are inherited from the parent dataset.

For more information about external storage in kubernetes, see
Expand Down Expand Up @@ -85,6 +91,26 @@ parameters:
```
For NFS, you can also specify other options, as described in [exports(5)][man exports].
The following example configures a storage class using the `Auto` type. The provisioner
will decide whether [HostPath][hostpath] or [NFS][nfs] will be used based on where the
pods are being scheduled.

```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: zfs-nfs
provisioner: pv.kubernetes.io/zfs
reclaimPolicy: Retain
parameters:
parentDataset: tank/kubernetes
hostname: storage-1.domain.tld
type: auto
node: storage-1 # the name of the node where the ZFS datasets are located.
shareProperties: rw,no_root_squash
reserveSpace: true
```

## Notes

### Reclaim policy
Expand Down
3 changes: 3 additions & 0 deletions charts/kubernetes-zfs-provisioner/templates/storageclass.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ metadata:
{{ toYaml . | nindent 4 }}
{{- end }}
provisioner: {{ $.Values.provisioner.instance }}
{{- if eq .type "auto" }}
volumeBindingMode: WaitForFirstConsumer
{{- end }}
reclaimPolicy: {{ .policy | default "Delete" }}
parameters:
parentDataset: {{ .parentDataset }}
Expand Down
2 changes: 1 addition & 1 deletion charts/kubernetes-zfs-provisioner/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ storageClass:
# policy: "Delete"
# # -- NFS export properties (see `exports(5)`)
# shareProperties: ""
# # -- Provision type, one of [`nfs`, `hostpath`]
# # -- Provision type, one of [`nfs`, `hostpath`, `auto`]
# type: "nfs"
# # -- Override `kubernetes.io/hostname` from `hostName` parameter for
# # `HostPath` node affinity
Expand Down
51 changes: 32 additions & 19 deletions pkg/provisioner/parameters.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,31 +19,34 @@ const (
parameters:
parentDataset: tank/volumes
hostname: my-zfs-host.localdomain
type: nfs|hostpath
type: nfs|hostPath|auto
shareProperties: rw=10.0.0.0/8,no_root_squash
node: my-zfs-host
reserveSpace: true|false
*/

type ProvisioningType string

const (
Nfs ProvisioningType = "nfs"
HostPath ProvisioningType = "hostPath"
Auto ProvisioningType = "auto"
)

type (
// ZFSStorageClassParameters represents the parameters on the `StorageClass`
// object. It is used to ease access and validate those parameters at run time.
ZFSStorageClassParameters struct {
// ParentDataset of the zpool. Needs to be existing on the target ZFS host.
ParentDataset string
// Hostname of the target ZFS host. Will be used to connect over SSH.
Hostname string
NFS *NFSParameters
HostPath *HostPathParameters
ReserveSpace bool
}
NFSParameters struct {
// ShareProperties specifies additional properties to pass to 'zfs create sharenfs=%s'.
ShareProperties string
}
HostPathParameters struct {
// NodeName overrides the hostname if the Kubernetes node name is different than the ZFS target host. Used for Affinity
NodeName string
Hostname string
Type ProvisioningType
// NFSShareProperties specifies additional properties to pass to 'zfs create sharenfs=%s'.
NFSShareProperties string
// HostPathNodeName overrides the hostname if the Kubernetes node name is different than the ZFS target host. Used for Affinity
HostPathNodeName string
ReserveSpace bool
}
)

Expand Down Expand Up @@ -79,16 +82,26 @@ func NewStorageClassParameters(parameters map[string]string) (*ZFSStorageClassPa
typeParam := parameters[TypeParameter]
switch typeParam {
case "hostpath", "hostPath", "HostPath", "Hostpath", "HOSTPATH":
p.HostPath = &HostPathParameters{NodeName: parameters[NodeNameParameter]}
return p, nil
p.Type = HostPath
case "nfs", "Nfs", "NFS":
p.Type = Nfs
case "auto", "Auto", "AUTO":
p.Type = Auto
default:
return nil, fmt.Errorf("invalid '%s' parameter value: %s", TypeParameter, typeParam)
}

if p.Type == HostPath || p.Type == Auto {
p.HostPathNodeName = parameters[NodeNameParameter]
}

if p.Type == Nfs || p.Type == Auto {
shareProps := parameters[SharePropertiesParameter]
if shareProps == "" {
shareProps = "on"
}
p.NFS = &NFSParameters{ShareProperties: shareProps}
return p, nil
default:
return nil, fmt.Errorf("invalid '%s' parameter value: %s", TypeParameter, typeParam)
p.NFSShareProperties = shareProps
}

return p, nil
}
10 changes: 5 additions & 5 deletions pkg/provisioner/parameters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func TestNewStorageClassParameters(t *testing.T) {
SharePropertiesParameter: "rw",
},
},
want: &ZFSStorageClassParameters{NFS: &NFSParameters{ShareProperties: "rw"}},
want: &ZFSStorageClassParameters{NFSShareProperties: "rw"},
},
{
name: "GivenCorrectSpec_WhenTypeNfsWithoutProperties_ThenReturnNfsParametersWithDefault",
Expand All @@ -88,7 +88,7 @@ func TestNewStorageClassParameters(t *testing.T) {
TypeParameter: "nfs",
},
},
want: &ZFSStorageClassParameters{NFS: &NFSParameters{ShareProperties: "on"}},
want: &ZFSStorageClassParameters{NFSShareProperties: "on"},
},
{
name: "GivenCorrectSpec_WhenTypeHostPath_ThenReturnHostPathParameters",
Expand All @@ -100,7 +100,7 @@ func TestNewStorageClassParameters(t *testing.T) {
NodeNameParameter: "my-node",
},
},
want: &ZFSStorageClassParameters{HostPath: &HostPathParameters{NodeName: "my-node"}},
want: &ZFSStorageClassParameters{HostPathNodeName: "my-node"},
},
}
for _, tt := range tests {
Expand All @@ -112,8 +112,8 @@ func TestNewStorageClassParameters(t *testing.T) {
return
}
assert.NoError(t, err)
assert.Equal(t, tt.want.NFS, result.NFS)
assert.Equal(t, tt.want.HostPath, result.HostPath)
assert.Equal(t, tt.want.NFSShareProperties, result.NFSShareProperties)
assert.Equal(t, tt.want.HostPathNodeName, result.HostPathNodeName)
})
}
}
93 changes: 58 additions & 35 deletions pkg/provisioner/provision.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,9 @@ package provisioner
import (
"context"
"fmt"
"slices"
"strconv"

"k8s.io/klog/v2"

"github.com/ccremer/kubernetes-zfs-provisioner/pkg/zfs"

v1 "k8s.io/api/core/v1"
Expand All @@ -24,8 +23,9 @@ func (p *ZFSProvisioner) Provision(ctx context.Context, options controller.Provi
datasetPath := fmt.Sprintf("%s/%s", parameters.ParentDataset, options.PVName)
properties := make(map[string]string)

if parameters.NFS != nil {
properties["sharenfs"] = parameters.NFS.ShareProperties
useHostPath := canUseHostPath(parameters, options)
if !useHostPath {
properties[ShareNfsProperty] = parameters.NFSShareProperties
}

var reclaimPolicy v1.PersistentVolumeReclaimPolicy
Expand Down Expand Up @@ -73,28 +73,44 @@ func (p *ZFSProvisioner) Provision(ctx context.Context, options controller.Provi
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: reclaimPolicy,
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany, v1.ReadOnlyMany, v1.ReadWriteOnce},
AccessModes: createAccessModes(useHostPath),
Capacity: v1.ResourceList{
v1.ResourceStorage: options.PVC.Spec.Resources.Requests[v1.ResourceStorage],
},
PersistentVolumeSource: createVolumeSource(parameters, dataset),
NodeAffinity: createNodeAffinity(parameters),
PersistentVolumeSource: createVolumeSource(parameters, dataset, useHostPath),
NodeAffinity: createNodeAffinity(parameters, useHostPath),
},
}
return pv, controller.ProvisioningFinished, nil
}

func createVolumeSource(parameters *ZFSStorageClassParameters, dataset *zfs.Dataset) v1.PersistentVolumeSource {
if parameters.NFS != nil {
return v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: parameters.Hostname,
Path: dataset.Mountpoint,
ReadOnly: false,
},
func canUseHostPath(parameters *ZFSStorageClassParameters, options controller.ProvisionOptions) bool {
switch parameters.Type {
case Nfs:
return false
case HostPath:
return true
case Auto:
if options.SelectedNode == nil || parameters.HostPathNodeName != options.SelectedNode.Name {
return false
}
if slices.Contains(options.PVC.Spec.AccessModes, v1.ReadOnlyMany) || slices.Contains(options.PVC.Spec.AccessModes, v1.ReadWriteMany) {
return false
}
}
if parameters.HostPath != nil {
return true
}

func createAccessModes(useHostPath bool) []v1.PersistentVolumeAccessMode {
accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
if !useHostPath {
accessModes = append(accessModes, v1.ReadOnlyMany, v1.ReadWriteMany)
}
return accessModes
}

func createVolumeSource(parameters *ZFSStorageClassParameters, dataset *zfs.Dataset, useHostPath bool) v1.PersistentVolumeSource {
if useHostPath {
hostPathType := v1.HostPathDirectory
return v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Expand All @@ -103,27 +119,34 @@ func createVolumeSource(parameters *ZFSStorageClassParameters, dataset *zfs.Data
},
}
}
klog.Exitf("Programmer error: Missing implementation for volume source: %v", parameters)
return v1.PersistentVolumeSource{}

return v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: parameters.Hostname,
Path: dataset.Mountpoint,
ReadOnly: false,
},
}
}

func createNodeAffinity(parameters *ZFSStorageClassParameters) *v1.VolumeNodeAffinity {
if parameters.HostPath != nil {
node := parameters.HostPath.NodeName
if node == "" {
node = parameters.Hostname
}
return &v1.VolumeNodeAffinity{Required: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Values: []string{node},
Operator: v1.NodeSelectorOpIn,
Key: v1.LabelHostname,
},
func createNodeAffinity(parameters *ZFSStorageClassParameters, useHostPath bool) *v1.VolumeNodeAffinity {
if !useHostPath {
return nil
}

node := parameters.HostPathNodeName
if node == "" {
node = parameters.Hostname
}
return &v1.VolumeNodeAffinity{Required: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Values: []string{node},
Operator: v1.NodeSelectorOpIn,
Key: v1.LabelHostname,
},
},
}}}
}
return nil
},
}}}
}
12 changes: 8 additions & 4 deletions pkg/provisioner/provision_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ func TestProvisionNfs(t *testing.T) {
pv, _, err := p.Provision(context.Background(), options)
require.NoError(t, err)
assertBasics(t, stub, pv, expectedDatasetName, expectedHost)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteOnce)
// Pods located on other nodes can mount this PV
assert.Contains(t, pv.Spec.AccessModes, v1.ReadOnlyMany)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteMany)

assert.Equal(t, v1.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)

Expand All @@ -66,10 +70,6 @@ func TestProvisionNfs(t *testing.T) {
func assertBasics(t *testing.T, stub *zfsStub, pv *v1.PersistentVolume, expectedDataset string, expectedHost string) {
stub.AssertExpectations(t)

assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteOnce)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadOnlyMany)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteMany)

assert.Contains(t, pv.Annotations, "my/annotation")
assert.Equal(t, expectedDataset, pv.Annotations[DatasetPathAnnotation])
assert.Equal(t, expectedHost, pv.Annotations[ZFSHostAnnotation])
Expand Down Expand Up @@ -111,6 +111,10 @@ func TestProvisionHostPath(t *testing.T) {
pv, _, err := p.Provision(context.Background(), options)
require.NoError(t, err)
assertBasics(t, stub, pv, expectedDatasetName, expectedHost)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteOnce)
// Pods located on other nodes cannot mount this PV
assert.NotContains(t, pv.Spec.AccessModes, v1.ReadOnlyMany)
assert.NotContains(t, pv.Spec.AccessModes, v1.ReadWriteMany)

assert.Equal(t, policy, pv.Spec.PersistentVolumeReclaimPolicy)

Expand Down
1 change: 1 addition & 0 deletions pkg/provisioner/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ const (

RefQuotaProperty = "refquota"
RefReservationProperty = "refreservation"
ShareNfsProperty = "sharenfs"
ManagedByProperty = "io.kubernetes.pv.zfs:managed_by"
ReclaimPolicyProperty = "io.kubernetes.pv.zfs:reclaim_policy"
)
Expand Down

0 comments on commit 52c1040

Please sign in to comment.