diff --git a/pkg/operator/ceph/csi/cluster_config.go b/pkg/operator/ceph/csi/cluster_config.go index 97928b9bf1ee..2f0d41c70c2c 100644 --- a/pkg/operator/ceph/csi/cluster_config.go +++ b/pkg/operator/ceph/csi/cluster_config.go @@ -127,9 +127,10 @@ func MonEndpoints(mons map[string]*cephclient.MonInfo, requireMsgr2 bool) []stri // the cluster-to-mon mapping required to configure ceph csi. func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *CsiClusterConfigEntry) (string, error) { var ( - cc csiClusterConfig - centry CsiClusterConfigEntry - found bool + cc csiClusterConfig + centry CsiClusterConfigEntry + found bool + kernelMountOptions, fuseMountOptions string ) cc, err := parseCsiClusterConfig(curr) @@ -137,6 +138,12 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C return "", errors.Wrap(err, "failed to parse current csi cluster config") } + // set mount options here to avoid doing nil checks multiple times. + if newCsiClusterConfigEntry != nil && newCsiClusterConfigEntry.CephFS != nil { + kernelMountOptions = newCsiClusterConfigEntry.CephFS.KernelMountOptions + fuseMountOptions = newCsiClusterConfigEntry.CephFS.FuseMountOptions + } + // Regardless of which controllers call updateCsiClusterConfig(), the values will be preserved since // a lock is acquired for the update operation. So concurrent updates (rare event) will block and // wait for the other update to complete. Monitors and Subvolumegroup will be updated @@ -147,6 +154,12 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C // update default clusterID's entry if clusterKey == centry.Namespace { centry.Monitors = newCsiClusterConfigEntry.Monitors + centry.ReadAffinity = newCsiClusterConfigEntry.ReadAffinity + if centry.CephFS == nil { + centry.CephFS = &CsiCephFSSpec{} + } + centry.CephFS.KernelMountOptions = kernelMountOptions + centry.CephFS.FuseMountOptions = fuseMountOptions cc[i] = centry } } @@ -160,15 +173,28 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C break } centry.Monitors = newCsiClusterConfigEntry.Monitors + // update subvolumegroup and cephfs netNamespaceFilePath only when either is specified + // while always updating kernel and fuse mount options. if newCsiClusterConfigEntry.CephFS != nil && (newCsiClusterConfigEntry.CephFS.SubvolumeGroup != "" || newCsiClusterConfigEntry.CephFS.NetNamespaceFilePath != "") { centry.CephFS = newCsiClusterConfigEntry.CephFS + } else { + if centry.CephFS == nil { + centry.CephFS = &CsiCephFSSpec{} + } + centry.CephFS.KernelMountOptions = kernelMountOptions + centry.CephFS.FuseMountOptions = fuseMountOptions } + // update nfs netNamespaceFilePath only when specified. if newCsiClusterConfigEntry.NFS != nil && newCsiClusterConfigEntry.NFS.NetNamespaceFilePath != "" { centry.NFS = newCsiClusterConfigEntry.NFS } + // update radosNamespace and rbd netNamespaceFilePath only when either is specified. if newCsiClusterConfigEntry.RBD != nil && (newCsiClusterConfigEntry.RBD.RadosNamespace != "" || newCsiClusterConfigEntry.RBD.NetNamespaceFilePath != "") { centry.RBD = newCsiClusterConfigEntry.RBD } + if newCsiClusterConfigEntry.ReadAffinity != nil && len(newCsiClusterConfigEntry.ReadAffinity.CrushLocationLabels) != 0 { + centry.ReadAffinity = newCsiClusterConfigEntry.ReadAffinity + } // This maintains backward compatibility for existing clusters, from now on the // preferred way is to use RBD.RadosNamespace if newCsiClusterConfigEntry.RadosNamespace != "" { @@ -186,16 +212,20 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C centry.ClusterID = clusterKey centry.Namespace = newCsiClusterConfigEntry.Namespace centry.Monitors = newCsiClusterConfigEntry.Monitors - if newCsiClusterConfigEntry.RBD != nil && (newCsiClusterConfigEntry.RBD.RadosNamespace != "" || newCsiClusterConfigEntry.CephFS.NetNamespaceFilePath != "") { + if newCsiClusterConfigEntry.RBD != nil && (newCsiClusterConfigEntry.RBD.RadosNamespace != "" || newCsiClusterConfigEntry.RBD.NetNamespaceFilePath != "") { centry.RBD = newCsiClusterConfigEntry.RBD } // Add a condition not to fill with empty values - if newCsiClusterConfigEntry.CephFS != nil && (newCsiClusterConfigEntry.CephFS.SubvolumeGroup != "" || newCsiClusterConfigEntry.CephFS.NetNamespaceFilePath != "") { + if newCsiClusterConfigEntry.CephFS != nil && (newCsiClusterConfigEntry.CephFS.SubvolumeGroup != "" || newCsiClusterConfigEntry.CephFS.NetNamespaceFilePath != "" || + newCsiClusterConfigEntry.CephFS.KernelMountOptions != "" || newCsiClusterConfigEntry.CephFS.FuseMountOptions != "") { centry.CephFS = newCsiClusterConfigEntry.CephFS } if newCsiClusterConfigEntry.NFS != nil && newCsiClusterConfigEntry.NFS.NetNamespaceFilePath != "" { centry.NFS = newCsiClusterConfigEntry.NFS } + if newCsiClusterConfigEntry.ReadAffinity != nil && len(newCsiClusterConfigEntry.ReadAffinity.CrushLocationLabels) != 0 { + centry.ReadAffinity = newCsiClusterConfigEntry.ReadAffinity + } cc = append(cc, centry) } } @@ -248,6 +278,19 @@ func SaveClusterConfig(clientset kubernetes.Interface, clusterNamespace string, } logger.Debugf("using %q for csi configmap namespace", csiNamespace) + if newCsiClusterConfigEntry != nil { + // set CSIDriverOptions + newCsiClusterConfigEntry.ReadAffinity = &cephv1.ReadAffinitySpec{ + Enabled: clusterInfo.CSIDriverSpec.ReadAffinity.Enabled, + CrushLocationLabels: clusterInfo.CSIDriverSpec.ReadAffinity.CrushLocationLabels, + } + if newCsiClusterConfigEntry.CephFS == nil { + newCsiClusterConfigEntry.CephFS = &CsiCephFSSpec{} + } + newCsiClusterConfigEntry.CephFS.KernelMountOptions = clusterInfo.CSIDriverSpec.CephFS.KernelMountOptions + newCsiClusterConfigEntry.CephFS.FuseMountOptions = clusterInfo.CSIDriverSpec.CephFS.FuseMountOptions + } + configMutex.Lock() defer configMutex.Unlock() diff --git a/pkg/operator/ceph/csi/cluster_config_test.go b/pkg/operator/ceph/csi/cluster_config_test.go index bc406f8dff18..3c307b7d3dda 100644 --- a/pkg/operator/ceph/csi/cluster_config_test.go +++ b/pkg/operator/ceph/csi/cluster_config_test.go @@ -38,6 +38,14 @@ func TestUpdateCsiClusterConfig(t *testing.T) { RadosNamespace: "rook-ceph-1", }, } + csiClusterConfigEntryMountOptions := CsiClusterConfigEntry{ + Namespace: "rook-ceph-1", + Monitors: []string{"1.2.3.4:5000"}, + CephFS: &CsiCephFSSpec{ + KernelMountOptions: "ms_mode=crc", + FuseMountOptions: "debug", + }, + } csiClusterConfigEntry2 := CsiClusterConfigEntry{ Namespace: "rook-ceph-2", Monitors: []string{"20.1.1.1:5000", "20.1.1.2:5000", "20.1.1.3:5000"}, @@ -72,6 +80,28 @@ func TestUpdateCsiClusterConfig(t *testing.T) { assert.Equal(t, 2, len(cc[0].Monitors)) }) + t.Run("add mount options to the current cluster", func(t *testing.T) { + configWithMountOptions, err := updateCsiClusterConfig(s, "rook-ceph-1", &csiClusterConfigEntryMountOptions) + assert.NoError(t, err) + cc, err := parseCsiClusterConfig(configWithMountOptions) + assert.NoError(t, err) + assert.Equal(t, 1, len(cc)) + assert.Equal(t, "rook-ceph-1", cc[0].ClusterID) + assert.Equal(t, csiClusterConfigEntryMountOptions.CephFS.KernelMountOptions, cc[0].CephFS.KernelMountOptions) + assert.Equal(t, csiClusterConfigEntryMountOptions.CephFS.FuseMountOptions, cc[0].CephFS.FuseMountOptions) + }) + + t.Run("add mount options to the current cluster", func(t *testing.T) { + configWithMountOptions, err := updateCsiClusterConfig(s, "rook-ceph-1", &csiClusterConfigEntryMountOptions) + assert.NoError(t, err) + cc, err := parseCsiClusterConfig(configWithMountOptions) + assert.NoError(t, err) + assert.Equal(t, 1, len(cc)) + assert.Equal(t, "rook-ceph-1", cc[0].ClusterID) + assert.Equal(t, csiClusterConfigEntryMountOptions.CephFS.KernelMountOptions, cc[0].CephFS.KernelMountOptions) + assert.Equal(t, csiClusterConfigEntryMountOptions.CephFS.FuseMountOptions, cc[0].CephFS.FuseMountOptions) + }) + t.Run("add a 2nd cluster with 3 mons", func(t *testing.T) { s, err = updateCsiClusterConfig(s, "beta", &csiClusterConfigEntry2) assert.NoError(t, err) @@ -128,6 +158,32 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }) + t.Run("update mount options in presence of subvolumegroup", func(t *testing.T) { + sMntOptionUpdate, err := updateCsiClusterConfig(s, "baba", &csiClusterConfigEntryMountOptions) + assert.NoError(t, err) + cc, err := parseCsiClusterConfig(sMntOptionUpdate) + assert.NoError(t, err) + assert.Equal(t, 3, len(cc)) + assert.Equal(t, "baba", cc[2].ClusterID) + assert.Equal(t, "my-group", cc[2].CephFS.SubvolumeGroup) + assert.Equal(t, csiClusterConfigEntryMountOptions.CephFS.KernelMountOptions, cc[2].CephFS.KernelMountOptions) + assert.Equal(t, csiClusterConfigEntryMountOptions.CephFS.FuseMountOptions, cc[2].CephFS.FuseMountOptions) + + }) + + t.Run("update mount options in presence of subvolumegroup", func(t *testing.T) { + sMntOptionUpdate, err := updateCsiClusterConfig(s, "baba", &csiClusterConfigEntryMountOptions) + assert.NoError(t, err) + cc, err := parseCsiClusterConfig(sMntOptionUpdate) + assert.NoError(t, err) + assert.Equal(t, 3, len(cc)) + assert.Equal(t, "baba", cc[2].ClusterID) + assert.Equal(t, "my-group", cc[2].CephFS.SubvolumeGroup) + assert.Equal(t, csiClusterConfigEntryMountOptions.CephFS.KernelMountOptions, cc[2].CephFS.KernelMountOptions) + assert.Equal(t, csiClusterConfigEntryMountOptions.CephFS.FuseMountOptions, cc[2].CephFS.FuseMountOptions) + + }) + t.Run("add a 4th mon to the 3rd cluster and subvolumegroup is preserved", func(t *testing.T) { csiClusterConfigEntry3.Monitors = append(csiClusterConfigEntry3.Monitors, "10.11.12.13:5000") s, err = updateCsiClusterConfig(s, "baba", &csiClusterConfigEntry3)