From b665d7a7b7b65aa9dde62f79e77445926c7064c4 Mon Sep 17 00:00:00 2001
From: Travis Nielsen
Date: Wed, 2 Oct 2024 14:11:55 -0600
Subject: [PATCH] core: remove support for ceph quincy
Given that Ceph Quincy (v17) is past end of life,
remove Quincy from the supported Ceph versions,
examples, and documentation.
Supported versions now include only Reef and Squid.
Signed-off-by: Travis Nielsen
---
.github/workflows/daily-nightly-jobs.yml | 122 +-----------------
.../CRDs/Cluster/ceph-cluster-crd.md | 14 +-
.../external-cluster/external-cluster.md | 2 +-
.../external-cluster/provider-export.md | 2 +-
.../Object-Storage/ceph-object-store-crd.md | 4 +-
Documentation/CRDs/ceph-nfs-crd.md | 12 --
Documentation/CRDs/specification.md | 7 +-
.../Storage-Configuration/NFS/nfs-security.md | 5 -
.../Storage-Configuration/NFS/nfs.md | 6 +-
.../ceph-object-bucket-notifications.md | 2 +-
.../Object-Storage-RGW/object-storage.md | 4 +-
Documentation/Upgrade/ceph-upgrade.md | 12 +-
PendingReleaseNotes.md | 1 +
deploy/charts/rook-ceph-cluster/values.yaml | 8 +-
.../charts/rook-ceph/templates/resources.yaml | 5 +-
deploy/examples/cluster.yaml | 10 +-
deploy/examples/crds.yaml | 5 +-
design/ceph/ceph-nfs-ganesha.md | 4 +-
design/ceph/object/ceph-sse-s3.md | 2 -
pkg/apis/ceph.rook.io/v1/types.go | 5 +-
pkg/daemon/ceph/client/config_test.go | 7 +-
pkg/daemon/ceph/client/upgrade.go | 4 +-
pkg/daemon/ceph/client/upgrade_test.go | 4 +-
pkg/daemon/ceph/osd/daemon.go | 8 --
pkg/daemon/ceph/osd/daemon_test.go | 17 +--
pkg/operator/ceph/cluster/cephstatus_test.go | 2 -
.../ceph/cluster/mgr/dashboard_test.go | 2 +-
.../ceph/cluster/mgr/orchestrator_test.go | 2 +-
pkg/operator/ceph/cluster/mon/mon_test.go | 12 +-
pkg/operator/ceph/cluster/mon/node_test.go | 12 +-
pkg/operator/ceph/cluster/osd/create_test.go | 4 +-
.../ceph/cluster/osd/integration_test.go | 2 +-
pkg/operator/ceph/cluster/osd/osd_test.go | 10 +-
pkg/operator/ceph/cluster/osd/spec.go | 29 +----
pkg/operator/ceph/cluster/osd/spec_test.go | 10 +-
pkg/operator/ceph/cluster/osd/status_test.go | 2 +-
pkg/operator/ceph/cluster/osd/update_test.go | 4 +-
.../ceph/cluster/rbd/controller_test.go | 2 +-
pkg/operator/ceph/cluster/rbd/spec_test.go | 2 +-
pkg/operator/ceph/cluster/version_test.go | 40 +++---
pkg/operator/ceph/config/defaults.go | 5 -
pkg/operator/ceph/config/monstore.go | 2 +-
.../ceph/controller/predicate_test.go | 8 +-
.../disruption/clusterdisruption/osd_test.go | 2 +-
pkg/operator/ceph/file/controller_test.go | 2 +-
pkg/operator/ceph/file/filesystem_test.go | 117 ++++++++++-------
pkg/operator/ceph/file/mds/spec_test.go | 2 +-
.../ceph/file/mirror/controller_test.go | 2 +-
pkg/operator/ceph/file/mirror/spec_test.go | 2 +-
pkg/operator/ceph/nfs/controller_test.go | 2 +-
pkg/operator/ceph/nfs/nfs_test.go | 4 +-
pkg/operator/ceph/nfs/security_test.go | 8 +-
pkg/operator/ceph/nfs/spec_test.go | 4 +-
pkg/operator/ceph/object/config.go | 2 +-
pkg/operator/ceph/object/config_test.go | 2 +-
pkg/operator/ceph/object/controller_test.go | 16 +--
pkg/operator/ceph/object/objectstore.go | 26 +---
pkg/operator/ceph/object/objectstore_test.go | 6 -
pkg/operator/ceph/object/spec.go | 6 -
pkg/operator/ceph/object/spec_test.go | 22 ++--
pkg/operator/ceph/version/version.go | 18 +--
pkg/operator/ceph/version/version_test.go | 31 ++---
tests/framework/installer/ceph_installer.go | 14 +-
tests/integration/ceph_auth_keystone_test.go | 2 +-
tests/integration/ceph_helm_test.go | 2 +-
tests/integration/ceph_multi_cluster_test.go | 4 +-
tests/integration/ceph_upgrade_test.go | 49 +------
67 files changed, 236 insertions(+), 531 deletions(-)
diff --git a/.github/workflows/daily-nightly-jobs.yml b/.github/workflows/daily-nightly-jobs.yml
index 65fd14ccb35d..a8577c38e0cf 100644
--- a/.github/workflows/daily-nightly-jobs.yml
+++ b/.github/workflows/daily-nightly-jobs.yml
@@ -110,46 +110,6 @@ jobs:
if: always()
run: sudo rm -rf /usr/bin/yq
- smoke-suite-quincy-devel:
- if: github.repository == 'rook/rook'
- runs-on: ubuntu-22.04
- steps:
- - name: checkout
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- with:
- fetch-depth: 0
-
- - name: consider debugging
- uses: ./.github/workflows/tmate_debug
- with:
- use-tmate: ${{ secrets.USE_TMATE }}
-
- - name: setup cluster resources
- uses: ./.github/workflows/integration-test-config-latest-k8s
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- kubernetes-version: "1.28.4"
-
- - name: TestCephSmokeSuite
- run: |
- export DEVICE_FILTER=$(tests/scripts/github-action-helper.sh find_extra_block_dev)
- SKIP_CLEANUP_POLICY=false CEPH_SUITE_VERSION="quincy-devel" go test -v -timeout 1800s -run TestCephSmokeSuite github.com/rook/rook/tests/integration
-
- - name: collect common logs
- if: always()
- run: |
- export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/"
- export CLUSTER_NAMESPACE="smoke-ns"
- export OPERATOR_NAMESPACE="smoke-ns-system"
- tests/scripts/collect-logs.sh
-
- - name: Artifact
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
- if: failure()
- with:
- name: ceph-smoke-suite-quincy-artifact
- path: /home/runner/work/rook/rook/tests/integration/_output/tests/
-
smoke-suite-reef-devel:
if: github.repository == 'rook/rook'
runs-on: ubuntu-22.04
@@ -270,46 +230,6 @@ jobs:
name: ceph-smoke-suite-master-artifact
path: /home/runner/work/rook/rook/tests/integration/_output/tests/
- object-suite-quincy-devel:
- if: github.repository == 'rook/rook'
- runs-on: ubuntu-22.04
- steps:
- - name: checkout
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- with:
- fetch-depth: 0
-
- - name: consider debugging
- uses: ./.github/workflows/tmate_debug
- with:
- use-tmate: ${{ secrets.USE_TMATE }}
-
- - name: setup cluster resources
- uses: ./.github/workflows/integration-test-config-latest-k8s
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- kubernetes-version: "1.28.4"
-
- - name: TestCephObjectSuite
- run: |
- export DEVICE_FILTER=$(tests/scripts/github-action-helper.sh find_extra_block_dev)
- SKIP_CLEANUP_POLICY=false CEPH_SUITE_VERSION="quincy-devel" go test -v -timeout 1800s -failfast -run TestCephObjectSuite github.com/rook/rook/tests/integration
-
- - name: collect common logs
- if: always()
- run: |
- export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/"
- export CLUSTER_NAMESPACE="object-ns"
- export OPERATOR_NAMESPACE="object-ns-system"
- tests/scripts/collect-logs.sh
-
- - name: Artifact
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
- if: failure()
- with:
- name: ceph-object-suite-quincy-artifact
- path: /home/runner/work/rook/rook/tests/integration/_output/tests/
-
object-suite-ceph-main:
if: github.repository == 'rook/rook'
runs-on: ubuntu-22.04
@@ -431,49 +351,9 @@ jobs:
name: ceph-upgrade-suite-reef-artifact
path: /home/runner/work/rook/rook/tests/integration/_output/tests/
- upgrade-from-quincy-stable-to-quincy-devel:
- if: github.repository == 'rook/rook'
- runs-on: ubuntu-22.04
- steps:
- - name: checkout
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- with:
- fetch-depth: 0
-
- - name: consider debugging
- uses: ./.github/workflows/tmate_debug
- with:
- use-tmate: ${{ secrets.USE_TMATE }}
-
- - name: setup cluster resources
- uses: ./.github/workflows/integration-test-config-latest-k8s
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- kubernetes-version: "1.28.4"
-
- - name: TestCephUpgradeSuite
- run: |
- export DEVICE_FILTER=$(tests/scripts/github-action-helper.sh find_extra_block_dev)
- go test -v -timeout 1800s -failfast -run TestCephUpgradeSuite/TestUpgradeCephToQuincyDevel github.com/rook/rook/tests/integration
-
- - name: collect common logs
- if: always()
- run: |
- export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/"
- export CLUSTER_NAMESPACE="upgrade"
- export OPERATOR_NAMESPACE="upgrade-system"
- tests/scripts/collect-logs.sh
-
- - name: Artifact
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
- if: failure()
- with:
- name: ceph-upgrade-suite-quincy-artifact
- path: /home/runner/work/rook/rook/tests/integration/_output/tests/
-
canary-tests:
if: github.repository == 'rook/rook'
uses: ./.github/workflows/canary-integration-test.yml
with:
- ceph_images: '["quay.io/ceph/ceph:v18", "quay.io/ceph/daemon-base:latest-main-devel", "quay.io/ceph/daemon-base:latest-quincy-devel", "quay.io/ceph/daemon-base:latest-reef-devel", "quay.io/ceph/daemon-base:latest-squid-devel"]'
+ ceph_images: '["quay.io/ceph/ceph:v18", "quay.io/ceph/daemon-base:latest-main-devel", "quay.io/ceph/daemon-base:latest-reef-devel", "quay.io/ceph/daemon-base:latest-squid-devel"]'
secrets: inherit
diff --git a/Documentation/CRDs/Cluster/ceph-cluster-crd.md b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
index 38edb30d8eb0..fce6603648a1 100755
--- a/Documentation/CRDs/Cluster/ceph-cluster-crd.md
+++ b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
@@ -29,9 +29,9 @@ Settings can be specified at the global level to apply to the cluster as a whole
* `image`: The image used for running the ceph daemons. For example, `quay.io/ceph/ceph:v18.2.4`. For more details read the [container images section](#ceph-container-images).
For the latest ceph images, see the [Ceph DockerHub](https://hub.docker.com/r/ceph/ceph/tags/).
To ensure a consistent version of the image is running across all nodes in the cluster, it is recommended to use a very specific image version.
- Tags also exist that would give the latest version, but they are only recommended for test environments. For example, the tag `v17` will be updated each time a new Quincy build is released.
- Using the `v17` tag is not recommended in production because it may lead to inconsistent versions of the image running across different nodes in the cluster.
- * `allowUnsupported`: If `true`, allow an unsupported major version of the Ceph release. Currently `quincy` and `reef` are supported. Future versions such as `squid` (v19) would require this to be set to `true`. Should be set to `false` in production.
+ Tags also exist that would give the latest version, but they are only recommended for test environments. For example, the tag `v19` will be updated each time a new Squid build is released.
+ Using the general `v19` tag is not recommended in production because it may lead to inconsistent versions of the image running across different nodes in the cluster.
+ * `allowUnsupported`: If `true`, allow an unsupported major version of the Ceph release. Currently Reef and Squid are supported. Future versions such as Tentacle (v20) would require this to be set to `true`. Should be set to `false` in production.
* `imagePullPolicy`: The image pull policy for the ceph daemon pods. Possible values are `Always`, `IfNotPresent`, and `Never`. The default is `IfNotPresent`.
* `dataDirHostPath`: The path on the host ([hostPath](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath)) where config and data should be stored for each of the services. If the directory does not exist, it will be created. Because this directory persists on the host, it will remain after pods are deleted. Following paths and any of their subpaths **must not be used**: `/etc/ceph`, `/rook` or `/var/log/ceph`.
* **WARNING**: For test scenarios, if you delete a cluster and start a new cluster on the same hosts, the path used by `dataDirHostPath` must be deleted. Otherwise, stale keys and other config will remain from the previous cluster and the new mons will fail to start.
@@ -120,10 +120,10 @@ These are general purpose Ceph container with all necessary daemons and dependen
| TAG | MEANING |
| -------------------- | --------------------------------------------------------- |
-| vRELNUM | Latest release in this series (e.g., **v17** = Quincy) |
-| vRELNUM.Y | Latest stable release in this stable series (e.g., v17.2) |
-| vRELNUM.Y.Z | A specific release (e.g., v18.2.4) |
-| vRELNUM.Y.Z-YYYYMMDD | A specific build (e.g., v18.2.4-20240724) |
+| vRELNUM | Latest release in this series (e.g., **v19** = Squid) |
+| vRELNUM.Y | Latest stable release in this stable series (e.g., v19.2) |
+| vRELNUM.Y.Z | A specific release (e.g., v19.2.0) |
+| vRELNUM.Y.Z-YYYYMMDD | A specific build (e.g., v19.2.0-20240927) |
A specific will contain a specific release of Ceph as well as security fixes from the Operating System.
diff --git a/Documentation/CRDs/Cluster/external-cluster/external-cluster.md b/Documentation/CRDs/Cluster/external-cluster/external-cluster.md
index 28dd279bfbf1..68b965fcedce 100644
--- a/Documentation/CRDs/Cluster/external-cluster/external-cluster.md
+++ b/Documentation/CRDs/Cluster/external-cluster/external-cluster.md
@@ -17,7 +17,7 @@ In external mode, Rook will provide the configuration for the CSI driver and oth
Create the desired types of storage in the provider Ceph cluster:
* [RBD pools](https://docs.ceph.com/en/latest/rados/operations/pools/#create-a-pool)
-* [CephFS filesystem](https://docs.ceph.com/en/quincy/cephfs/createfs/)
+* [CephFS filesystem](https://docs.ceph.com/en/latest/cephfs/createfs/)
## Connect the external Ceph Provider cluster to the Rook consumer cluster
diff --git a/Documentation/CRDs/Cluster/external-cluster/provider-export.md b/Documentation/CRDs/Cluster/external-cluster/provider-export.md
index 7f3b3e8e7abf..db64ab9584f0 100644
--- a/Documentation/CRDs/Cluster/external-cluster/provider-export.md
+++ b/Documentation/CRDs/Cluster/external-cluster/provider-export.md
@@ -105,7 +105,7 @@ python3 create-external-cluster-resources.py --cephfs-filesystem-name --format bash --rgw-endpoint --rgw-realm-name > --rgw-zonegroup-name --rgw-zone-name >
diff --git a/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md b/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md
index 4b4efb8f0418..9c928a372983 100644
--- a/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md
+++ b/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md
@@ -148,7 +148,7 @@ The protocols section is divided into two parts:
In the `s3` section of the `protocols` section the following options can be configured:
* `authKeystone`: Whether S3 should also authenticated using Keystone (`true`) or not (`false`). If set to `false` the default S3 auth will be used.
-* `enabled`: Whether to enable S3 (`true`) or not (`false`). The default is `true` even if the section is not listed at all! Please note that S3 should not be disabled in a [Ceph Multi Site configuration](https://docs.ceph.com/en/quincy/radosgw/multisite).
+* `enabled`: Whether to enable S3 (`true`) or not (`false`). The default is `true` even if the section is not listed at all! Please note that S3 should not be disabled in a [Ceph Multi Site configuration](https://docs.ceph.com/en/latest/radosgw/multisite).
#### protocols/swift settings
@@ -332,9 +332,7 @@ vault kv put rook/ key=$(openssl rand -base64 32) # kv engine
vault write -f transit/keys/ exportable=true # transit engine
```
-* TLS authentication with custom certificates between Vault and CephObjectStore RGWs are supported from ceph v16.2.6 onwards
* `tokenSecretName` can be (and often will be) the same for both kms and s3 configurations.
-* `AWS-SSE:S3` requires Ceph Quincy v17.2.3 or later.
## Deleting a CephObjectStore
diff --git a/Documentation/CRDs/ceph-nfs-crd.md b/Documentation/CRDs/ceph-nfs-crd.md
index 8accbbc7c884..16721d8f4f44 100644
--- a/Documentation/CRDs/ceph-nfs-crd.md
+++ b/Documentation/CRDs/ceph-nfs-crd.md
@@ -194,15 +194,3 @@ the size of the cluster.
not always happen due to the Kubernetes scheduler.
* Workaround: It is safest to run only a single NFS server, but we do not limit this if it
benefits your use case.
-
-### Ceph v17.2.1
-
-* Ceph NFS management with the Rook mgr module enabled has a breaking regression with the Ceph
- Quincy v17.2.1 release.
- * Workaround: Leave Ceph's Rook orchestrator mgr module disabled. If you have enabled it, you must
- disable it using the snippet below from the toolbox.
-
- ```console
- ceph orch set backend ""
- ceph mgr module disable rook
- ```
diff --git a/Documentation/CRDs/specification.md b/Documentation/CRDs/specification.md
index ebd043d22f6e..888db90878a4 100644
--- a/Documentation/CRDs/specification.md
+++ b/Documentation/CRDs/specification.md
@@ -8892,7 +8892,7 @@ map[github.com/rook/rook/pkg/apis/ceph.rook.io/v1.CephNetworkType]string
networks when the “multus” network provider is used. This config section is not used for
other network providers.
Valid keys are “public” and “cluster”. Refer to Ceph networking documentation for more:
-https://docs.ceph.com/en/reef/rados/configuration/network-config-ref/
+https://docs.ceph.com/en/latest/rados/configuration/network-config-ref/
Refer to Multus network annotation documentation for help selecting values:
https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/how-to-use.md#run-pod-with-network-annotation
Rook will make a best-effort attempt to automatically detect CIDR address ranges for given
@@ -9574,8 +9574,7 @@ The object store’s advertiseEndpoint and Kubernetes service endpoint, plus
Each DNS name must be valid according RFC-1123.
If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
wildcard itself in the list of hostnames.
-E.g., use “mystore.example.com” instead of “*.mystore.example.com”.
-The feature is supported only for Ceph v18 and later versions.
+E.g., use “mystore.example.com” instead of “*.mystore.example.com”.
@@ -10169,7 +10168,7 @@ string
(Optional)
- Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/quincy/radosgw/rgw-cache/#cache-api
+Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/latest/radosgw/rgw-cache/#cache-api
|
diff --git a/Documentation/Storage-Configuration/NFS/nfs-security.md b/Documentation/Storage-Configuration/NFS/nfs-security.md
index 4a3aa2baca2c..decd1c0e9a90 100644
--- a/Documentation/Storage-Configuration/NFS/nfs-security.md
+++ b/Documentation/Storage-Configuration/NFS/nfs-security.md
@@ -26,11 +26,6 @@ users stored in LDAP can be associated with NFS users and vice versa.
mapping from a number of sources including LDAP, Active Directory, and FreeIPA. Currently, only
LDAP has been tested.
-!!! attention
- The Ceph container image must have the `sssd-client` package installed to support SSSD. This
- package is included in `quay.io/ceph/ceph` in v17.2.4 and newer. For older Ceph versions you may
- build your own Ceph image which adds `RUN yum install sssd-client && yum clean all`.
-
#### SSSD configuration
SSSD requires a configuration file in order to configure its connection to the user ID mapping
diff --git a/Documentation/Storage-Configuration/NFS/nfs.md b/Documentation/Storage-Configuration/NFS/nfs.md
index 5d15154fb12b..176427cab791 100644
--- a/Documentation/Storage-Configuration/NFS/nfs.md
+++ b/Documentation/Storage-Configuration/NFS/nfs.md
@@ -64,11 +64,7 @@ The Ceph CLI can be used from the Rook toolbox pod to create and manage NFS expo
ensure the necessary Ceph mgr modules are enabled, if necessary, and that the Ceph orchestrator
backend is set to Rook.
-#### Enable the Ceph orchestrator if necessary
-
-* Required for Ceph v16.2.7 and below
-* Optional for Ceph v16.2.8 and above
-* Must be disabled for Ceph v17.2.1 due to a [Ceph regression](../../CRDs/ceph-nfs-crd.md#ceph-v1721)
+#### Enable the Ceph orchestrator (optional)
```console
ceph mgr module enable rook
diff --git a/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-notifications.md b/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-notifications.md
index 6c29b3bdf9d1..4df51db963ee 100644
--- a/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-notifications.md
+++ b/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-notifications.md
@@ -79,7 +79,7 @@ spec:
8. `http` (optional) hold the spec for an HTTP endpoint. The format of the URI would be: `http[s]://[:][/]`
+ port defaults to: 80/443 for HTTP/S accordingly
9. `disableVerifySSL` indicates whether the RGW is going to verify the SSL certificate of the HTTP server in case HTTPS is used ("false" by default)
-10. `sendCloudEvents`: (optional) send the notifications with the [CloudEvents header](https://github.com/cloudevents/spec/blob/main/cloudevents/adapters/aws-s3.md). Supported for Ceph Quincy (v17) or newer ("false" by default)
+10. `sendCloudEvents`: (optional) send the notifications with the [CloudEvents header](https://github.com/cloudevents/spec/blob/main/cloudevents/adapters/aws-s3.md). ("false" by default)
11. `amqp` (optional) hold the spec for an AMQP endpoint. The format of the URI would be: `amqp[s]://[:@][:][/]`
+ port defaults to: 5672/5671 for AMQP/S accordingly
+ user/password defaults to: guest/guest
diff --git a/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md b/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
index 4c291a0c2409..3ada73c7eb94 100644
--- a/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
+++ b/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
@@ -23,7 +23,7 @@ Rook can configure the Ceph Object Store for several different scenarios. See ea
Rook has the ability to either deploy an object store in Kubernetes or to connect to an external RGW service.
Most commonly, the object store will be configured in Kubernetes by Rook.
-Alternatively see the [external section](#connect-to-an-external-object-store) to consume an existing Ceph cluster with [Rados Gateways](https://docs.ceph.com/en/quincy/radosgw/index.html) from Rook.
+Alternatively see the [external section](#connect-to-an-external-object-store) to consume an existing Ceph cluster with [Rados Gateways](https://docs.ceph.com/en/latest/radosgw/index.html) from Rook.
### Create a Local Object Store with S3
@@ -198,7 +198,7 @@ This section contains a guide on how to configure [RGW's pool placement and stor
Object Storage API allows users to override where bucket data will be stored during bucket creation. With `` parameter in S3 API and `X-Storage-Policy` header in SWIFT. Similarly, users can override where object data will be stored by setting `X-Amz-Storage-Class` and `X-Object-Storage-Class` during object creation.
-To enable this feature, configure `poolPlacements` representing a list of possible bucket data locations.
+To enable this feature, configure `poolPlacements` representing a list of possible bucket data locations.
Each `poolPlacement` must have:
* a **unique** `name` to refer to it in `` or `X-Storage-Policy`. A placement with reserved name `default` will be used by default if no location constraint is provided.
diff --git a/Documentation/Upgrade/ceph-upgrade.md b/Documentation/Upgrade/ceph-upgrade.md
index 0ea0e3a5109c..5205eac2b7cd 100644
--- a/Documentation/Upgrade/ceph-upgrade.md
+++ b/Documentation/Upgrade/ceph-upgrade.md
@@ -24,23 +24,15 @@ until all the daemons have been updated.
## Supported Versions
-Rook v1.15 supports the following Ceph versions:
+Rook v1.16 supports the following Ceph versions:
+* Ceph Squid v19.2.0 or newer
* Ceph Reef v18.2.0 or newer
-* Ceph Quincy v17.2.0 or newer
!!! important
When an update is requested, the operator will check Ceph's status,
**if it is in `HEALTH_ERR` the operator will refuse to proceed with the upgrade.**
-!!! warning
- Ceph v17.2.2 has a blocking issue when running with Rook. Use v17.2.3 or newer when possible.
-
-### CephNFS User Consideration
-
-Ceph Quincy v17.2.1 has a potentially breaking regression with CephNFS. See the NFS documentation's
-[known issue](../CRDs/ceph-nfs-crd.md#ceph-v1721) for more detail.
-
### Ceph Images
Official Ceph container images can be found on [Quay](https://quay.io/repository/ceph/ceph?tab=tags).
diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md
index 779cd62017d5..111d57e0f74c 100644
--- a/PendingReleaseNotes.md
+++ b/PendingReleaseNotes.md
@@ -2,5 +2,6 @@
## Breaking Changes
+- Removed support for Ceph Quincy (v17) since it has reached end of life
## Features
diff --git a/deploy/charts/rook-ceph-cluster/values.yaml b/deploy/charts/rook-ceph-cluster/values.yaml
index f154e012628b..7de9fdebb169 100644
--- a/deploy/charts/rook-ceph-cluster/values.yaml
+++ b/deploy/charts/rook-ceph-cluster/values.yaml
@@ -89,14 +89,14 @@ cephClusterSpec:
# For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
- # v17 is Quincy, v18 is Reef.
+ # v18 is Reef, v19 is Squid
# RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.4-20240724
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: quay.io/ceph/ceph:v18.2.4
- # Whether to allow unsupported versions of Ceph. Currently `quincy`, and `reef` are supported.
- # Future versions such as `squid` (v19) would require this to be set to `true`.
+ # Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
+ # Future versions such as Tentacle (v20) would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
@@ -169,7 +169,7 @@ cephClusterSpec:
encryption:
enabled: false
# Whether to compress the data in transit across the wire. The default is false.
- # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
+ # The kernel requirements above for encryption also apply to compression.
compression:
enabled: false
# Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
diff --git a/deploy/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml
index 0092ad799fad..214888caadb4 100644
--- a/deploy/charts/rook-ceph/templates/resources.yaml
+++ b/deploy/charts/rook-ceph/templates/resources.yaml
@@ -2564,7 +2564,7 @@ spec:
other network providers.
Valid keys are "public" and "cluster". Refer to Ceph networking documentation for more:
- https://docs.ceph.com/en/reef/rados/configuration/network-config-ref/
+ https://docs.ceph.com/en/latest/rados/configuration/network-config-ref/
Refer to Multus network annotation documentation for help selecting values:
https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/how-to-use.md#run-pod-with-network-annotation
@@ -12046,7 +12046,6 @@ spec:
If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
wildcard itself in the list of hostnames.
E.g., use "mystore.example.com" instead of "*.mystore.example.com".
- The feature is supported only for Ceph v18 and later versions.
items:
type: string
type: array
@@ -12527,7 +12526,7 @@ spec:
nullable: true
properties:
amz-cache:
- description: Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/quincy/radosgw/rgw-cache/#cache-api
+ description: Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/latest/radosgw/rgw-cache/#cache-api
enum:
- '*'
- read
diff --git a/deploy/examples/cluster.yaml b/deploy/examples/cluster.yaml
index a01f99f396b0..345897fb58bd 100644
--- a/deploy/examples/cluster.yaml
+++ b/deploy/examples/cluster.yaml
@@ -16,14 +16,14 @@ metadata:
spec:
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
- # v17 is Quincy, v18 is Reef.
- # RECOMMENDATION: In production, use a specific version tag instead of the general v17 flag, which pulls the latest release and could result in different
+ # v18 is Reef, v19 is Squid
+ # RECOMMENDATION: In production, use a specific version tag instead of the general v19 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.4-20240724
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: quay.io/ceph/ceph:v18.2.4
- # Whether to allow unsupported versions of Ceph. Currently `quincy` and `reef` are supported.
- # Future versions such as `squid` (v19) would require this to be set to `true`.
+ # Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
+ # Future versions such as Tentacle (v20) would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
# The path on the host where configuration files will be persisted. Must be specified.
@@ -322,7 +322,7 @@ spec:
cephfs:
# Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
# kernelMountOptions: ""
- # Set CephFS Fuse mount options to use https://docs.ceph.com/en/quincy/man/8/ceph-fuse/#options.
+ # Set CephFS Fuse mount options to use https://docs.ceph.com/en/latest/man/8/ceph-fuse/#options.
# fuseMountOptions: ""
# healthChecks
diff --git a/deploy/examples/crds.yaml b/deploy/examples/crds.yaml
index aa8759c17784..dc75ac22e092 100644
--- a/deploy/examples/crds.yaml
+++ b/deploy/examples/crds.yaml
@@ -2562,7 +2562,7 @@ spec:
other network providers.
Valid keys are "public" and "cluster". Refer to Ceph networking documentation for more:
- https://docs.ceph.com/en/reef/rados/configuration/network-config-ref/
+ https://docs.ceph.com/en/latest/rados/configuration/network-config-ref/
Refer to Multus network annotation documentation for help selecting values:
https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/how-to-use.md#run-pod-with-network-annotation
@@ -12037,7 +12037,6 @@ spec:
If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
wildcard itself in the list of hostnames.
E.g., use "mystore.example.com" instead of "*.mystore.example.com".
- The feature is supported only for Ceph v18 and later versions.
items:
type: string
type: array
@@ -12517,7 +12516,7 @@ spec:
nullable: true
properties:
amz-cache:
- description: Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/quincy/radosgw/rgw-cache/#cache-api
+ description: Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/latest/radosgw/rgw-cache/#cache-api
enum:
- '*'
- read
diff --git a/design/ceph/ceph-nfs-ganesha.md b/design/ceph/ceph-nfs-ganesha.md
index a26647f62af3..9baa05e1eda6 100644
--- a/design/ceph/ceph-nfs-ganesha.md
+++ b/design/ceph/ceph-nfs-ganesha.md
@@ -218,9 +218,7 @@ Directory, and FreeIPA.
Prototype information detailed on Rook blog:
https://blog.rook.io/prototyping-an-nfs-connection-to-ldap-using-sssd-7c27f624f1a4
-NFS-Ganesha (via libraries within its container) is the client to SSSD. As of Ceph v17.2.3, the Ceph
-container image does not have the `sssd-client` package installed which is required for supporting
-SSSD. It is available starting from Ceph v17.2.4.
+NFS-Ganesha (via libraries within its container) is the client to SSSD.
The following directories must be shared between SSSD and the NFS-Ganesha container:
- `/var/lib/sss/pipes`: this directory holds the sockets used to communicate between client and SSSD
diff --git a/design/ceph/object/ceph-sse-s3.md b/design/ceph/object/ceph-sse-s3.md
index 4bad91c8acd7..73e6391d0dde 100644
--- a/design/ceph/object/ceph-sse-s3.md
+++ b/design/ceph/object/ceph-sse-s3.md
@@ -9,8 +9,6 @@ AWS server side encryption SSE-S3 support for RGW
## Summary
The S3 protocol supports three different types of [server side encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html): SSE-C, SSE-KMS and SSE-S3. For the last two RGW server need to configure with external services such as [vault](https://www.vaultproject.io/). Currently Rook configure RGW with `SSE-KMS` options to handle the S3 requests with the `sse:kms` header. Recently the support for handling the `sse:s3` was added to RGW, so Rook will now provide the option to configure RGW with `sse:s3`.
-The `sse:s3` is supported only from Ceph v17 an onwards, so this feature can only be enabled for Quincy or newer.
-
### Goals
Configure RGW with `SSE-S3` options, so that RGW can handle request with `sse:s3` headers.
diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go
index d06c20081f95..27eba7c978e6 100755
--- a/pkg/apis/ceph.rook.io/v1/types.go
+++ b/pkg/apis/ceph.rook.io/v1/types.go
@@ -1863,7 +1863,6 @@ type ObjectStoreHostingSpec struct {
// If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
// wildcard itself in the list of hostnames.
// E.g., use "mystore.example.com" instead of "*.mystore.example.com".
- // The feature is supported only for Ceph v18 and later versions.
// +optional
DNSNames []string `json:"dnsNames,omitempty"`
}
@@ -1985,7 +1984,7 @@ type ObjectUserCapSpec struct {
Info string `json:"info,omitempty"`
// +optional
// +kubebuilder:validation:Enum={"*","read","write","read, write"}
- // Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/quincy/radosgw/rgw-cache/#cache-api
+ // Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/latest/radosgw/rgw-cache/#cache-api
AMZCache string `json:"amz-cache,omitempty"`
// +optional
// +kubebuilder:validation:Enum={"*","read","write","read, write"}
@@ -2627,7 +2626,7 @@ type NetworkSpec struct {
// other network providers.
//
// Valid keys are "public" and "cluster". Refer to Ceph networking documentation for more:
- // https://docs.ceph.com/en/reef/rados/configuration/network-config-ref/
+ // https://docs.ceph.com/en/latest/rados/configuration/network-config-ref/
//
// Refer to Multus network annotation documentation for help selecting values:
// https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/how-to-use.md#run-pod-with-network-annotation
diff --git a/pkg/daemon/ceph/client/config_test.go b/pkg/daemon/ceph/client/config_test.go
index c3e4496ae683..cfb2afa20a6a 100644
--- a/pkg/daemon/ceph/client/config_test.go
+++ b/pkg/daemon/ceph/client/config_test.go
@@ -29,7 +29,6 @@ import (
"github.com/go-ini/ini"
"github.com/rook/rook/pkg/clusterd"
- cephver "github.com/rook/rook/pkg/operator/ceph/version"
"github.com/stretchr/testify/assert"
)
@@ -42,7 +41,6 @@ func TestCreateDefaultCephConfig(t *testing.T) {
"node0": {Name: "mon0", Endpoint: "10.0.0.1:6789"},
"node1": {Name: "mon1", Endpoint: "10.0.0.2:6789"},
},
- CephVersion: cephver.Quincy,
}
// start with INFO level logging
@@ -94,9 +92,8 @@ func TestGenerateConfigFile(t *testing.T) {
Monitors: map[string]*MonInfo{
"node0": {Name: "mon0", Endpoint: "10.0.0.1:6789"},
},
- CephVersion: cephver.Quincy,
- CephCred: CephCred{Username: "admin", Secret: "mysecret"},
- Context: ctx,
+ CephCred: CephCred{Username: "admin", Secret: "mysecret"},
+ Context: ctx,
}
isInitialized := clusterInfo.IsInitialized()
diff --git a/pkg/daemon/ceph/client/upgrade.go b/pkg/daemon/ceph/client/upgrade.go
index 9fea46170209..0b9ffcc21a36 100644
--- a/pkg/daemon/ceph/client/upgrade.go
+++ b/pkg/daemon/ceph/client/upgrade.go
@@ -131,7 +131,7 @@ func OkToStop(context *clusterd.Context, clusterInfo *ClusterInfo, deployment, d
// if we have less than 3 mons we skip the check and do best-effort
// we do less than 3 because during the initial bootstrap the mon sequence is updated too
// so running the check on 2/3 mon fails
- // versions.Mon looks like this map[ceph version 17.0.0-12-g6c8fb92 (6c8fb920cb1d862f36ee852ed849a15f9a50bd68) quincy (dev):1]
+ // versions.Mon looks like this map[ceph version 19.0.0-12-g6c8fb92 (6c8fb920cb1d862f36ee852ed849a15f9a50bd68) squid (dev):1]
// now looping over a single element since we can't address the key directly (we don't know its name)
for _, monCount := range versions.Mon {
if monCount < 3 {
@@ -215,7 +215,7 @@ func okToContinueMDSDaemon(context *clusterd.Context, clusterInfo *ClusterInfo,
//
// "mon": {
// "ceph version 18.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) reef (stable)": 2,
-// "ceph version 17.2.6 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 1
+// "ceph version 19.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) squid (stable)": 1
// }
//
// In the case we will pick: "ceph version 18.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) reef (stable)": 2,
diff --git a/pkg/daemon/ceph/client/upgrade_test.go b/pkg/daemon/ceph/client/upgrade_test.go
index 6630fd196886..45329915cce4 100644
--- a/pkg/daemon/ceph/client/upgrade_test.go
+++ b/pkg/daemon/ceph/client/upgrade_test.go
@@ -62,7 +62,7 @@ func TestEnableReleaseOSDFunctionality(t *testing.T) {
}
context := &clusterd.Context{Executor: executor}
- err := EnableReleaseOSDFunctionality(context, AdminTestClusterInfo("mycluster"), "quincy")
+ err := EnableReleaseOSDFunctionality(context, AdminTestClusterInfo("mycluster"), "squid")
assert.NoError(t, err)
}
@@ -129,7 +129,7 @@ func TestDaemonMapEntry(t *testing.T) {
{
"mon": {
"ceph version 18.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) reef (stable)": 1,
- "ceph version 17.2.7 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2
+ "ceph version 19.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) squid (stable)": 2
}
}`)
diff --git a/pkg/daemon/ceph/osd/daemon.go b/pkg/daemon/ceph/osd/daemon.go
index 61063aa8328b..1ede9148f407 100644
--- a/pkg/daemon/ceph/osd/daemon.go
+++ b/pkg/daemon/ceph/osd/daemon.go
@@ -33,7 +33,6 @@ import (
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/daemon/ceph/client"
oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd"
- cephver "github.com/rook/rook/pkg/operator/ceph/version"
"github.com/rook/rook/pkg/util/sys"
)
@@ -395,13 +394,6 @@ func getAvailableDevices(context *clusterd.Context, agent *OsdAgent) (*DeviceOsd
}
}
- if device.Type == sys.LoopType {
- if !agent.clusterInfo.CephVersion.IsAtLeast(cephver.CephVersion{Major: 17, Minor: 2, Extra: 4}) {
- logger.Infof("partition %q is not picked because loop devices are not allowed on Ceph clusters older than v17.2.4", device.Name)
- continue
- }
- }
-
// Check if the desired device is available
//
// We need to use the /dev path, provided by the NAME property from "lsblk --paths",
diff --git a/pkg/daemon/ceph/osd/daemon_test.go b/pkg/daemon/ceph/osd/daemon_test.go
index bf6dcbd5e4cd..eabc974f3016 100644
--- a/pkg/daemon/ceph/osd/daemon_test.go
+++ b/pkg/daemon/ceph/osd/daemon_test.go
@@ -382,8 +382,6 @@ NAME="sdb1" SIZE="30" TYPE="part" PKNAME="sdb"`, nil
{Name: "loop0", RealPath: "/dev/loop0", Type: sys.LoopType},
}
- version := cephver.Quincy
-
// select all devices, including nvme01 for metadata
pvcBackedOSD := false
agent := &OsdAgent{
@@ -392,7 +390,6 @@ NAME="sdb1" SIZE="30" TYPE="part" PKNAME="sdb"`, nil
pvcBacked: pvcBackedOSD,
clusterInfo: &cephclient.ClusterInfo{},
}
- agent.clusterInfo.CephVersion = version
mapping, err := getAvailableDevices(context, agent)
assert.Nil(t, err)
assert.Equal(t, 7, len(mapping.Entries))
@@ -536,19 +533,8 @@ NAME="sdb1" SIZE="30" TYPE="part" PKNAME="sdb"`, nil
{Name: "sda", DevLinks: "/dev/disk/by-id/scsi-0123 /dev/disk/by-path/pci-0:1:2:3-scsi-1", RealPath: "/dev/sda"},
}
- // loop device: Ceph version is too old
- agent.pvcBacked = false
- agent.devices = []DesiredDevice{{Name: "loop0"}}
- mapping, err = getAvailableDevices(context, agent)
- assert.Nil(t, err)
- assert.Equal(t, 0, len(mapping.Entries))
-
// loop device: specify a loop device
- agent.clusterInfo.CephVersion = cephver.CephVersion{
- Major: 17,
- Minor: 2,
- Extra: 4,
- }
+ agent.clusterInfo.CephVersion = cephver.Squid
agent.pvcBacked = false
agent.devices = []DesiredDevice{{Name: "loop0"}}
mapping, err = getAvailableDevices(context, agent)
@@ -574,7 +560,6 @@ NAME="sdb1" SIZE="30" TYPE="part" PKNAME="sdb"`, nil
mapping, err = getAvailableDevices(context, agent)
assert.Nil(t, err)
assert.Equal(t, 0, len(mapping.Entries))
- agent.clusterInfo.CephVersion = cephver.Quincy
}
func TestGetVolumeGroupName(t *testing.T) {
diff --git a/pkg/operator/ceph/cluster/cephstatus_test.go b/pkg/operator/ceph/cluster/cephstatus_test.go
index 1dc01a67bef4..d464f3a529bb 100644
--- a/pkg/operator/ceph/cluster/cephstatus_test.go
+++ b/pkg/operator/ceph/cluster/cephstatus_test.go
@@ -29,7 +29,6 @@ import (
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
- "github.com/rook/rook/pkg/operator/ceph/version"
optest "github.com/rook/rook/pkg/operator/test"
exectest "github.com/rook/rook/pkg/util/exec/test"
"github.com/stretchr/testify/assert"
@@ -162,7 +161,6 @@ func TestConfigureHealthSettings(t *testing.T) {
context: &clusterd.Context{},
clusterInfo: cephclient.AdminTestClusterInfo("ns"),
}
- c.clusterInfo.CephVersion = version.Quincy
setGlobalIDReclaim := false
c.context.Executor = &exectest.MockExecutor{
MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) {
diff --git a/pkg/operator/ceph/cluster/mgr/dashboard_test.go b/pkg/operator/ceph/cluster/mgr/dashboard_test.go
index f82ada4d32d3..4fea16c18d75 100644
--- a/pkg/operator/ceph/cluster/mgr/dashboard_test.go
+++ b/pkg/operator/ceph/cluster/mgr/dashboard_test.go
@@ -114,7 +114,7 @@ func TestStartSecureDashboard(t *testing.T) {
ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef()
clusterInfo := &cephclient.ClusterInfo{
Namespace: "myns",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
OwnerInfo: ownerInfo,
Context: ctx,
}
diff --git a/pkg/operator/ceph/cluster/mgr/orchestrator_test.go b/pkg/operator/ceph/cluster/mgr/orchestrator_test.go
index 58c5923aa01c..e2215accf6b0 100644
--- a/pkg/operator/ceph/cluster/mgr/orchestrator_test.go
+++ b/pkg/operator/ceph/cluster/mgr/orchestrator_test.go
@@ -59,7 +59,7 @@ func TestOrchestratorModules(t *testing.T) {
}
clusterInfo := &cephclient.ClusterInfo{
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
Context: context.TODO(),
}
context := &clusterd.Context{Executor: executor}
diff --git a/pkg/operator/ceph/cluster/mon/mon_test.go b/pkg/operator/ceph/cluster/mon/mon_test.go
index 9d27be4014c9..e5918745adc3 100644
--- a/pkg/operator/ceph/cluster/mon/mon_test.go
+++ b/pkg/operator/ceph/cluster/mon/mon_test.go
@@ -190,7 +190,7 @@ func TestStartMonPods(t *testing.T) {
}
// start a basic cluster
- _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
// test annotations
@@ -201,7 +201,7 @@ func TestStartMonPods(t *testing.T) {
validateStart(t, c)
// starting again should be a no-op
- _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
validateStart(t, c)
@@ -215,7 +215,7 @@ func TestOperatorRestart(t *testing.T) {
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// start a basic cluster
- info, err := c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ info, err := c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
assert.NoError(t, info.IsInitialized())
@@ -225,7 +225,7 @@ func TestOperatorRestart(t *testing.T) {
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// starting again should be a no-op, but will not result in an error
- info, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ info, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
assert.NoError(t, info.IsInitialized())
@@ -243,7 +243,7 @@ func TestOperatorRestartHostNetwork(t *testing.T) {
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// start a basic cluster
- info, err := c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ info, err := c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
assert.NoError(t, info.IsInitialized())
@@ -255,7 +255,7 @@ func TestOperatorRestartHostNetwork(t *testing.T) {
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// starting again should be a no-op, but still results in an error
- info, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ info, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
assert.NoError(t, info.IsInitialized(), info)
diff --git a/pkg/operator/ceph/cluster/mon/node_test.go b/pkg/operator/ceph/cluster/mon/node_test.go
index cac949235f50..ba9236f0dccd 100644
--- a/pkg/operator/ceph/cluster/mon/node_test.go
+++ b/pkg/operator/ceph/cluster/mon/node_test.go
@@ -86,7 +86,7 @@ func TestHostNetworkSameNode(t *testing.T) {
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// start a basic cluster
- _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.Error(t, err)
}
@@ -104,7 +104,7 @@ func TestPodMemory(t *testing.T) {
c := newCluster(context, namespace, true, r)
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// start a basic cluster
- _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
// Test REQUEST == LIMIT
@@ -120,7 +120,7 @@ func TestPodMemory(t *testing.T) {
c = newCluster(context, namespace, true, r)
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// start a basic cluster
- _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
// Test LIMIT != REQUEST but obviously LIMIT > REQUEST
@@ -136,7 +136,7 @@ func TestPodMemory(t *testing.T) {
c = newCluster(context, namespace, true, r)
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// start a basic cluster
- _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
// Test valid case where pod resource is set appropriately
@@ -152,7 +152,7 @@ func TestPodMemory(t *testing.T) {
c = newCluster(context, namespace, true, r)
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// start a basic cluster
- _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
// Test no resources were specified on the pod
@@ -160,7 +160,7 @@ func TestPodMemory(t *testing.T) {
c = newCluster(context, namespace, true, r)
c.ClusterInfo = clienttest.CreateTestClusterInfo(1)
// start a basic cluster
- _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Quincy, c.spec)
+ _, err = c.Start(c.ClusterInfo, c.rookImage, cephver.Squid, c.spec)
assert.NoError(t, err)
}
diff --git a/pkg/operator/ceph/cluster/osd/create_test.go b/pkg/operator/ceph/cluster/osd/create_test.go
index 7312d8b95ae7..2939eea27bd4 100644
--- a/pkg/operator/ceph/cluster/osd/create_test.go
+++ b/pkg/operator/ceph/cluster/osd/create_test.go
@@ -310,7 +310,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) {
clusterInfo := &cephclient.ClusterInfo{
Namespace: namespace,
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterInfo.SetName("mycluster")
clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t)
@@ -447,7 +447,7 @@ func Test_startProvisioningOverNodes(t *testing.T) {
clusterInfo := &cephclient.ClusterInfo{
Namespace: namespace,
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterInfo.SetName("mycluster")
clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t)
diff --git a/pkg/operator/ceph/cluster/osd/integration_test.go b/pkg/operator/ceph/cluster/osd/integration_test.go
index 9ff325f8ae25..e402633ac4d0 100644
--- a/pkg/operator/ceph/cluster/osd/integration_test.go
+++ b/pkg/operator/ceph/cluster/osd/integration_test.go
@@ -591,7 +591,7 @@ func osdIntegrationTestExecutor(t *testing.T, clientset *fake.Clientset, namespa
}
if args[0] == "versions" {
// the update deploy code only cares about the mons from the ceph version command results
- v := `{"mon":{"ceph version 17.2.1 (somehash) quincy (stable)":3}}`
+ v := `{"mon":{"ceph version 19.2.1 (somehash) squid (stable)":3}}`
return v, nil
}
return "", errors.Errorf("unexpected ceph command %q", args)
diff --git a/pkg/operator/ceph/cluster/osd/osd_test.go b/pkg/operator/ceph/cluster/osd/osd_test.go
index de2e62c270e7..1ed4571185f9 100644
--- a/pkg/operator/ceph/cluster/osd/osd_test.go
+++ b/pkg/operator/ceph/cluster/osd/osd_test.go
@@ -118,7 +118,7 @@ func TestStart(t *testing.T) {
client := clientfake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build()
clusterInfo := &cephclient.ClusterInfo{
Namespace: namespace,
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
Context: context.TODO(),
}
clusterInfo.SetName("rook-ceph-test")
@@ -201,7 +201,7 @@ func TestAddRemoveNode(t *testing.T) {
clusterInfo := &cephclient.ClusterInfo{
Namespace: namespace,
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
Context: ctx,
}
clusterInfo.SetName("rook-ceph-test")
@@ -425,7 +425,7 @@ func TestPostReconcileUpdateOSDProperties(t *testing.T) {
client := clientfake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build()
clusterInfo := &cephclient.ClusterInfo{
Namespace: namespace,
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
Context: context.TODO(),
}
clusterInfo.SetName("rook-ceph-test")
@@ -475,7 +475,7 @@ func TestAddNodeFailure(t *testing.T) {
clusterInfo := &cephclient.ClusterInfo{
Namespace: "ns-add-remove",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
Context: context.TODO(),
}
clusterInfo.SetName("testcluster")
@@ -1085,7 +1085,7 @@ func TestValidateOSDSettings(t *testing.T) {
namespace := "ns"
clusterInfo := &cephclient.ClusterInfo{
Namespace: namespace,
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
Context: context.TODO(),
}
clusterInfo.SetName("rook-ceph-test")
diff --git a/pkg/operator/ceph/cluster/osd/spec.go b/pkg/operator/ceph/cluster/osd/spec.go
index 33477cdbf88b..386bb9a24719 100644
--- a/pkg/operator/ceph/cluster/osd/spec.go
+++ b/pkg/operator/ceph/cluster/osd/spec.go
@@ -65,7 +65,6 @@ const (
bluestoreBlockName = "block"
bluestoreMetadataName = "block.db"
bluestoreWalName = "block.wal"
- tempEtcCephDir = "/etc/temp-ceph"
osdPortv1 = 6801
osdPortv2 = 6800
)
@@ -105,36 +104,12 @@ set -o nounset # fail if variables are unset
set -o xtrace
OSD_ID="$ROOK_OSD_ID"
-CEPH_FSID=%s
OSD_UUID=%s
OSD_STORE_FLAG="%s"
OSD_DATA_DIR=/var/lib/ceph/osd/ceph-"$OSD_ID"
CV_MODE=%s
DEVICE="$%s"
-# "ceph.conf" must have the "fsid" global configuration to activate encrypted OSDs
-# after the following Ceph's PR is merged.
-# https://github.com/ceph/ceph/commit/25655e5a8829e001adf467511a6bde8142b0a575
-# This limitation will be removed later. After that, we can remove this
-# fsid injection code. Probably a good time is when to remove Quincy support.
-# https://github.com/rook/rook/pull/10333#discussion_r892817877
-cp --no-preserve=mode /etc/temp-ceph/ceph.conf /etc/ceph/ceph.conf
-python3 -c "
-import configparser
-
-config = configparser.ConfigParser()
-config.read('/etc/ceph/ceph.conf')
-
-if not config.has_section('global'):
- config['global'] = {}
-
-if not config.has_option('global','fsid'):
- config['global']['fsid'] = '$CEPH_FSID'
-
-with open('/etc/ceph/ceph.conf', 'w') as configfile:
- config.write(configfile)
-"
-
# create new keyring
ceph -n client.admin auth get-or-create osd."$OSD_ID" mon 'allow profile osd' mgr 'allow profile osd' osd 'allow *' -k /etc/ceph/admin-keyring-store/keyring
@@ -894,7 +869,7 @@ func (c *Cluster) getActivateOSDInitContainer(configDir, namespace, osdID string
volMounts := []v1.VolumeMount{
{Name: activateOSDVolumeName, MountPath: activateOSDMountPathID},
{Name: "devices", MountPath: "/dev"},
- {Name: k8sutil.ConfigOverrideName, ReadOnly: true, MountPath: tempEtcCephDir},
+ {Name: k8sutil.ConfigOverrideName, ReadOnly: true, MountPath: opconfig.EtcCephDir},
adminKeyringVolMount,
}
@@ -908,7 +883,7 @@ func (c *Cluster) getActivateOSDInitContainer(configDir, namespace, osdID string
Command: []string{
"/bin/bash",
"-c",
- fmt.Sprintf(activateOSDOnNodeCode, c.clusterInfo.FSID, osdInfo.UUID, osdStoreFlag, osdInfo.CVMode, blockPathVarName),
+ fmt.Sprintf(activateOSDOnNodeCode, osdInfo.UUID, osdStoreFlag, osdInfo.CVMode, blockPathVarName),
},
Name: "activate",
Image: c.spec.CephVersion.Image,
diff --git a/pkg/operator/ceph/cluster/osd/spec_test.go b/pkg/operator/ceph/cluster/osd/spec_test.go
index b4dbfc2d6507..5f5a9579be39 100644
--- a/pkg/operator/ceph/cluster/osd/spec_test.go
+++ b/pkg/operator/ceph/cluster/osd/spec_test.go
@@ -85,7 +85,7 @@ func testPodDevices(t *testing.T, dataDir, deviceName string, allDevices bool) {
clientset := fake.NewSimpleClientset()
clusterInfo := &cephclient.ClusterInfo{
Namespace: "ns",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterInfo.SetName("test")
clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t)
@@ -463,7 +463,7 @@ func testPodDevices(t *testing.T, dataDir, deviceName string, allDevices bool) {
t.Run(("check hostpid and shareprocessnamespace"), func(t *testing.T) {
clusterInfo := &cephclient.ClusterInfo{
Namespace: "ns",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterInfo.SetName("test")
clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t)
@@ -512,7 +512,7 @@ func TestStorageSpecConfig(t *testing.T) {
clientset := fake.NewSimpleClientset()
clusterInfo := &cephclient.ClusterInfo{
Namespace: "ns",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterInfo.SetName("testing")
clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t)
@@ -595,7 +595,7 @@ func TestHostNetwork(t *testing.T) {
clientset := fake.NewSimpleClientset()
clusterInfo := &cephclient.ClusterInfo{
Namespace: "ns",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterInfo.SetName("test")
@@ -756,7 +756,7 @@ func TestOSDPlacement(t *testing.T) {
clientset := fake.NewSimpleClientset()
clusterInfo := &cephclient.ClusterInfo{
Namespace: "ns",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterInfo.SetName("testing")
clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t)
diff --git a/pkg/operator/ceph/cluster/osd/status_test.go b/pkg/operator/ceph/cluster/osd/status_test.go
index 533286fb4721..78a07ddbd34d 100644
--- a/pkg/operator/ceph/cluster/osd/status_test.go
+++ b/pkg/operator/ceph/cluster/osd/status_test.go
@@ -40,7 +40,7 @@ func TestOrchestrationStatus(t *testing.T) {
clientset := fake.NewSimpleClientset()
clusterInfo := &cephclient.ClusterInfo{
Namespace: "ns",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: &exectest.MockExecutor{}}
spec := cephv1.ClusterSpec{}
diff --git a/pkg/operator/ceph/cluster/osd/update_test.go b/pkg/operator/ceph/cluster/osd/update_test.go
index 134ba2dba9b7..47b1da7498b7 100644
--- a/pkg/operator/ceph/cluster/osd/update_test.go
+++ b/pkg/operator/ceph/cluster/osd/update_test.go
@@ -557,7 +557,7 @@ func Test_getOSDUpdateInfo(t *testing.T) {
}
clusterInfo := &cephclient.ClusterInfo{
Namespace: namespace,
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterInfo.SetName("mycluster")
clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t)
@@ -589,7 +589,7 @@ func Test_getOSDUpdateInfo(t *testing.T) {
// osd.1 and 3 in another namespace (another Rook cluster)
clusterInfo2 := &cephclient.ClusterInfo{
Namespace: "other-namespace",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterInfo2.SetName("other-cluster")
clusterInfo2.OwnerInfo = cephclient.NewMinimumOwnerInfo(t)
diff --git a/pkg/operator/ceph/cluster/rbd/controller_test.go b/pkg/operator/ceph/cluster/rbd/controller_test.go
index 1bfbf3c4f3f4..b32a3d3bf7db 100644
--- a/pkg/operator/ceph/cluster/rbd/controller_test.go
+++ b/pkg/operator/ceph/cluster/rbd/controller_test.go
@@ -47,7 +47,7 @@ const (
dummyVersionsRaw = `
{
"mon": {
- "ceph version 17.2.1 (0000000000000000000000000000000000) quincy (stable)": 3
+ "ceph version 19.2.1 (0000000000000000000000000000000000) squid (stable)": 3
}
}`
)
diff --git a/pkg/operator/ceph/cluster/rbd/spec_test.go b/pkg/operator/ceph/cluster/rbd/spec_test.go
index d03596645f52..2be8c04b588d 100644
--- a/pkg/operator/ceph/cluster/rbd/spec_test.go
+++ b/pkg/operator/ceph/cluster/rbd/spec_test.go
@@ -76,7 +76,7 @@ func TestPodSpec(t *testing.T) {
TypeMeta: controllerTypeMeta,
}
clusterInfo := &cephclient.ClusterInfo{
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
s := scheme.Scheme
object := []runtime.Object{rbdMirror}
diff --git a/pkg/operator/ceph/cluster/version_test.go b/pkg/operator/ceph/cluster/version_test.go
index 8f4a2d65747b..23037c5dc537 100755
--- a/pkg/operator/ceph/cluster/version_test.go
+++ b/pkg/operator/ceph/cluster/version_test.go
@@ -37,7 +37,7 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) {
fakeRunningVersions := []byte(`
{
"mon": {
- "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 1,
+ "ceph version 19.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) squid (stable)": 1,
"ceph version 18.1.0 (4be78cea2b4ae54a27b1049cffa1208df48bffae) reef (stable)": 2
}
}`)
@@ -53,7 +53,7 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) {
fakeRunningVersions = []byte(`
{
"overall": {
- "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 1,
+ "ceph version 19.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) squid (stable)": 1,
"ceph version 18.1.0 (4be78cea2b4ae54a27b1049cffa1208df48bffae) reef (stable)": 2
}
}`)
@@ -69,7 +69,7 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) {
fakeRunningVersions = []byte(`
{
"overall": {
- "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2
+ "ceph version 19.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) squid (stable)": 2
}
}`)
var dummyRunningVersions3 cephv1.CephDaemonsVersions
@@ -82,11 +82,11 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) {
assert.True(t, m)
// 4 test - spec version is higher than running cluster --> we upgrade
- fakeImageVersion = cephver.Quincy
+ fakeImageVersion = cephver.Squid
fakeRunningVersions = []byte(`
{
"overall": {
- "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2
+ "ceph version 19.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) squid (stable)": 2
}
}`)
var dummyRunningVersions4 cephv1.CephDaemonsVersions
@@ -98,12 +98,12 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) {
assert.True(t, m)
// 5 test - spec version and running cluster versions are identical --> we upgrade
- fakeImageVersion = cephver.CephVersion{Major: 17, Minor: 2, Extra: 0,
+ fakeImageVersion = cephver.CephVersion{Major: 19, Minor: 2, Extra: 0,
CommitID: "3a54b2b6d167d4a2a19e003a705696d4fe619afc"}
fakeRunningVersions = []byte(`
{
"overall": {
- "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2
+ "ceph version 19.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) squid (stable)": 2
}
}`)
var dummyRunningVersions5 cephv1.CephDaemonsVersions
@@ -115,12 +115,12 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) {
assert.False(t, m)
// 6 test - spec version and running cluster have different commit ID
- fakeImageVersion = cephver.CephVersion{Major: 17, Minor: 2, Extra: 0, Build: 139,
+ fakeImageVersion = cephver.CephVersion{Major: 19, Minor: 2, Extra: 0, Build: 139,
CommitID: "3a54b2b6d167d4a2a19e003a705696d4fe619afc"}
fakeRunningVersions = []byte(`
{
"overall": {
- "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2
+ "ceph version 19.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) squid (stable)": 2
}
}`)
var dummyRunningVersions6 cephv1.CephDaemonsVersions
@@ -132,12 +132,12 @@ func TestDiffImageSpecAndClusterRunningVersion(t *testing.T) {
assert.True(t, m)
// 7 test - spec version and running cluster have same commit ID
- fakeImageVersion = cephver.CephVersion{Major: 17, Minor: 2, Extra: 0,
+ fakeImageVersion = cephver.CephVersion{Major: 19, Minor: 2, Extra: 0,
CommitID: "3a54b2b6d167d4a2a19e003a705696d4fe619afc"}
fakeRunningVersions = []byte(`
{
"overall": {
- "ceph version 17.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) quincy (stable)": 2
+ "ceph version 19.2.0 (3a54b2b6d167d4a2a19e003a705696d4fe619afc) squid (stable)": 2
}
}`)
var dummyRunningVersions7 cephv1.CephDaemonsVersions
@@ -154,16 +154,14 @@ func TestMinVersion(t *testing.T) {
c.Spec.CephVersion.AllowUnsupported = true
c.ClusterInfo = &client.ClusterInfo{Context: context.TODO()}
- // All versions less than 16.2.0 or invalid tag are invalid
- v := &cephver.CephVersion{Major: 16, Minor: 1, Extra: 999}
+ // All versions less than 18.2.0 or invalid tag are invalid
+ v := &cephver.CephVersion{Major: 18, Minor: 1, Extra: 999}
assert.Error(t, c.validateCephVersion(v))
- v = &cephver.CephVersion{Major: 15, Minor: 2, Extra: 11}
+ v = &cephver.CephVersion{Major: 16, Minor: 2, Extra: 11}
assert.Error(t, c.validateCephVersion(v))
- // All versions at least 17.2.0 are valid
- v = &cephver.CephVersion{Major: 17, Minor: 2}
- assert.NoError(t, c.validateCephVersion(v))
- v = &cephver.CephVersion{Major: 18}
+ // All versions at least 18.2.0 are valid
+ v = &cephver.CephVersion{Major: 18, Minor: 2}
assert.NoError(t, c.validateCephVersion(v))
v = &cephver.CephVersion{Major: 19}
assert.NoError(t, c.validateCephVersion(v))
@@ -174,13 +172,9 @@ func TestSupportedVersion(t *testing.T) {
c.ClusterInfo = &client.ClusterInfo{Context: context.TODO()}
// lower version is not supported
- v := &cephver.CephVersion{Major: 16, Minor: 2, Extra: 7}
+ v := &cephver.CephVersion{Major: 17, Minor: 2, Extra: 7}
assert.Error(t, c.validateCephVersion(v))
- // Quincy is supported
- v = &cephver.CephVersion{Major: 17, Minor: 2, Extra: 0}
- assert.NoError(t, c.validateCephVersion(v))
-
// Reef is supported
v = &cephver.CephVersion{Major: 18, Minor: 2, Extra: 0}
assert.NoError(t, c.validateCephVersion(v))
diff --git a/pkg/operator/ceph/config/defaults.go b/pkg/operator/ceph/config/defaults.go
index d659d52e7f1e..8cf6f239153d 100644
--- a/pkg/operator/ceph/config/defaults.go
+++ b/pkg/operator/ceph/config/defaults.go
@@ -59,11 +59,6 @@ func DefaultCentralizedConfigs(cephVersion version.CephVersion) map[string]strin
"mon allow pool size one": "true",
}
- // Every release before Quincy will enable PG auto repair on Bluestore OSDs
- if !cephVersion.IsAtLeastQuincy() {
- overrides["osd scrub auto repair"] = "true"
- }
-
return overrides
}
diff --git a/pkg/operator/ceph/config/monstore.go b/pkg/operator/ceph/config/monstore.go
index 01c4eac44157..ac2e9d39e508 100644
--- a/pkg/operator/ceph/config/monstore.go
+++ b/pkg/operator/ceph/config/monstore.go
@@ -197,7 +197,7 @@ func (m *MonStore) DeleteAll(options ...Option) error {
// SetKeyValue sets an arbitrary key/value pair in Ceph's general purpose (as opposed to
// configuration-specific) key/value store. Keys and values can be any arbitrary string including
// spaces, underscores, dashes, and slashes.
-// See: https://docs.ceph.com/en/quincy/man/8/ceph/#config-key
+// See: https://docs.ceph.com/en/latest/man/8/ceph/#config-key
func (m *MonStore) SetKeyValue(key, value string) error {
logger.Debugf("setting %q=%q option in the mon config-key store", key, value)
args := []string{"config-key", "set", key, value}
diff --git a/pkg/operator/ceph/controller/predicate_test.go b/pkg/operator/ceph/controller/predicate_test.go
index 63565f154543..e1158ebf02ba 100644
--- a/pkg/operator/ceph/controller/predicate_test.go
+++ b/pkg/operator/ceph/controller/predicate_test.go
@@ -98,18 +98,18 @@ func TestIsUpgrade(t *testing.T) {
assert.False(t, b)
// different value do something
- newLabel["ceph_version"] = "17.2.0-quincy"
+ newLabel["ceph_version"] = "19.2.0-squid"
b = isUpgrade(oldLabel, newLabel)
assert.True(t, b, fmt.Sprintf("%v,%v", oldLabel, newLabel))
// same value do nothing
- oldLabel["ceph_version"] = "17.2.0-quincy"
- newLabel["ceph_version"] = "17.2.0-quincy"
+ oldLabel["ceph_version"] = "19.2.0-squid"
+ newLabel["ceph_version"] = "19.2.0-squid"
b = isUpgrade(oldLabel, newLabel)
assert.False(t, b, fmt.Sprintf("%v,%v", oldLabel, newLabel))
// different value do something
- newLabel["ceph_version"] = "17.2.1-quincy"
+ newLabel["ceph_version"] = "19.2.1-squid"
b = isUpgrade(oldLabel, newLabel)
assert.True(t, b, fmt.Sprintf("%v,%v", oldLabel, newLabel))
}
diff --git a/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go b/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go
index aefc87897b89..92d8af82d108 100644
--- a/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go
+++ b/pkg/operator/ceph/disruption/clusterdisruption/osd_test.go
@@ -44,7 +44,7 @@ import (
const (
healthyCephStatus = `{"fsid":"877a47e0-7f6c-435e-891a-76983ab8c509","health":{"checks":{},"status":"HEALTH_OK"},"election_epoch":12,"quorum":[0,1,2],"quorum_names":["a","b","c"],"monmap":{"epoch":3,"fsid":"877a47e0-7f6c-435e-891a-76983ab8c509","modified":"2020-11-02 09:58:23.015313","created":"2020-11-02 09:57:37.719235","min_mon_release":14,"min_mon_release_name":"nautilus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"172.30.74.42:3300","nonce":0},{"type":"v1","addr":"172.30.74.42:6789","nonce":0}]},"addr":"172.30.74.42:6789/0","public_addr":"172.30.74.42:6789/0"},{"rank":1,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"172.30.101.61:3300","nonce":0},{"type":"v1","addr":"172.30.101.61:6789","nonce":0}]},"addr":"172.30.101.61:6789/0","public_addr":"172.30.101.61:6789/0"},{"rank":2,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"172.30.250.55:3300","nonce":0},{"type":"v1","addr":"172.30.250.55:6789","nonce":0}]},"addr":"172.30.250.55:6789/0","public_addr":"172.30.250.55:6789/0"}]},"osdmap":{"osdmap":{"epoch":19,"num_osds":3,"num_up_osds":3,"num_in_osds":3,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":96}],"num_pgs":96,"num_pools":3,"num_objects":79,"data_bytes":81553681,"bytes_used":3255447552,"bytes_avail":1646011994112,"bytes_total":1649267441664,"read_bytes_sec":853,"write_bytes_sec":5118,"read_op_per_sec":1,"write_op_per_sec":0},"fsmap":{"epoch":9,"id":1,"up":1,"in":1,"max":1,"by_rank":[{"filesystem_id":1,"rank":0,"name":"ocs-storagecluster-cephfilesystem-b","status":"up:active","gid":14161},{"filesystem_id":1,"rank":0,"name":"ocs-storagecluster-cephfilesystem-a","status":"up:standby-replay","gid":24146}],"up:standby":0},"mgrmap":{"epoch":10,"active_gid":14122,"active_name":"a","active_addrs":{"addrvec":[{"type":"v2","addr":"10.131.0.28:6800","nonce":1},{"type":"v1","addr":"10.131.0.28:6801","nonce":1}]}}}`
unHealthyCephStatus = `{"fsid":"613975f3-3025-4802-9de1-a2280b950e75","health":{"checks":{"OSD_DOWN":{"severity":"HEALTH_WARN","summary":{"message":"1 osds down"}},"OSD_HOST_DOWN":{"severity":"HEALTH_WARN","summary":{"message":"1 host (1 osds) down"}},"PG_AVAILABILITY":{"severity":"HEALTH_WARN","summary":{"message":"Reduced data availability: 101 pgs stale"}},"POOL_APP_NOT_ENABLED":{"severity":"HEALTH_WARN","summary":{"message":"application not enabled on 1 pool(s)"}}},"status":"HEALTH_WARN","overall_status":"HEALTH_WARN"},"election_epoch":12,"quorum":[0,1,2],"quorum_names":["rook-ceph-mon0","rook-ceph-mon2","rook-ceph-mon1"],"monmap":{"epoch":3,"fsid":"613975f3-3025-4802-9de1-a2280b950e75","modified":"2017-08-11 20:13:02.075679","created":"2017-08-11 20:12:35.314510","features":{"persistent":["kraken","luminous"],"optional":[]},"mons":[{"rank":0,"name":"rook-ceph-mon0","addr":"10.3.0.45:6789/0","public_addr":"10.3.0.45:6789/0"},{"rank":1,"name":"rook-ceph-mon2","addr":"10.3.0.249:6789/0","public_addr":"10.3.0.249:6789/0"},{"rank":2,"name":"rook-ceph-mon1","addr":"10.3.0.252:6789/0","public_addr":"10.3.0.252:6789/0"}]},"osdmap":{"osdmap":{"epoch":17,"num_osds":2,"num_up_osds":1,"num_in_osds":2,"full":false,"nearfull":true,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"stale+active+clean","count":101},{"state_name":"active+clean","count":99}],"num_pgs":200,"num_pools":2,"num_objects":243,"data_bytes":976793635,"bytes_used":13611479040,"bytes_avail":19825307648,"bytes_total":33436786688},"fsmap":{"epoch":1,"by_rank":[]},"mgrmap":{"epoch":3,"active_gid":14111,"active_name":"rook-ceph-mgr0","active_addr":"10.2.73.6:6800/9","available":true,"standbys":[],"modules":["restful","status"],"available_modules":["dashboard","prometheus","restful","status","zabbix"]},"servicemap":{"epoch":1,"modified":"0.000000","services":{}}}`
- healthyCephStatusRemapped = `{"fsid":"e32d91a2-24ff-4953-bc4a-6864d31dd2a0","health":{"status":"HEALTH_OK","checks":{},"mutes":[]},"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_age":1177701,"monmap":{"epoch":1,"min_mon_release_name":"quincy","num_mons":1},"osdmap":{"epoch":1800,"num_osds":5,"num_up_osds":5,"osd_up_since":1699834324,"num_in_osds":5,"osd_in_since":1699834304,"num_remapped_pgs":11},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":174},{"state_name":"active+remapped+backfilling","count":10},{"state_name":"active+clean+remapped","count":1}],"num_pgs":185,"num_pools":9,"num_objects":2383,"data_bytes":2222656224,"bytes_used":8793104384,"bytes_avail":18050441216,"bytes_total":26843545600,"misplaced_objects":139,"misplaced_total":7149,"misplaced_ratio":0.019443278780248985,"recovering_objects_per_sec":10,"recovering_bytes_per_sec":9739877,"recovering_keys_per_sec":0,"num_objects_recovered":62,"num_bytes_recovered":58471087,"num_keys_recovered":0,"write_bytes_sec":2982994,"read_op_per_sec":0,"write_op_per_sec":26},"fsmap":{"epoch":1,"by_rank":[],"up:standby":0},"mgrmap":{"available":true,"num_standbys":0,"modules":["iostat","nfs","prometheus","restful"],"services":{"prometheus":"http://10.244.0.36:9283/"}},"servicemap":{"epoch":1,"modified":"0.000000","services":{}},"progress_events":{}}`
+ healthyCephStatusRemapped = `{"fsid":"e32d91a2-24ff-4953-bc4a-6864d31dd2a0","health":{"status":"HEALTH_OK","checks":{},"mutes":[]},"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_age":1177701,"monmap":{"epoch":1,"min_mon_release_name":"reef","num_mons":1},"osdmap":{"epoch":1800,"num_osds":5,"num_up_osds":5,"osd_up_since":1699834324,"num_in_osds":5,"osd_in_since":1699834304,"num_remapped_pgs":11},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":174},{"state_name":"active+remapped+backfilling","count":10},{"state_name":"active+clean+remapped","count":1}],"num_pgs":185,"num_pools":9,"num_objects":2383,"data_bytes":2222656224,"bytes_used":8793104384,"bytes_avail":18050441216,"bytes_total":26843545600,"misplaced_objects":139,"misplaced_total":7149,"misplaced_ratio":0.019443278780248985,"recovering_objects_per_sec":10,"recovering_bytes_per_sec":9739877,"recovering_keys_per_sec":0,"num_objects_recovered":62,"num_bytes_recovered":58471087,"num_keys_recovered":0,"write_bytes_sec":2982994,"read_op_per_sec":0,"write_op_per_sec":26},"fsmap":{"epoch":1,"by_rank":[],"up:standby":0},"mgrmap":{"available":true,"num_standbys":0,"modules":["iostat","nfs","prometheus","restful"],"services":{"prometheus":"http://10.244.0.36:9283/"}},"servicemap":{"epoch":1,"modified":"0.000000","services":{}},"progress_events":{}}`
)
var nodeName = "node01"
diff --git a/pkg/operator/ceph/file/controller_test.go b/pkg/operator/ceph/file/controller_test.go
index cf7f67d82816..293d370abac6 100644
--- a/pkg/operator/ceph/file/controller_test.go
+++ b/pkg/operator/ceph/file/controller_test.go
@@ -142,7 +142,7 @@ const (
dummyVersionsRaw = `
{
"mon": {
- "ceph version 17.2.1 (000000000000000000000000000000) quincy (stable)": 3
+ "ceph version 19.2.1 (000000000000000000000000000000) squid (stable)": 3
}
}`
)
diff --git a/pkg/operator/ceph/file/filesystem_test.go b/pkg/operator/ceph/file/filesystem_test.go
index 856add3885eb..75f33ade9f32 100644
--- a/pkg/operator/ceph/file/filesystem_test.go
+++ b/pkg/operator/ceph/file/filesystem_test.go
@@ -224,7 +224,7 @@ func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool, createData
versionStr, _ := json.Marshal(
map[string]map[string]int{
"mds": {
- "ceph version 17.0.0-0-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) quincy (stable)": 2,
+ "ceph version 19.0.0-0-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) squid (stable)": 2,
},
})
return string(versionStr), nil
@@ -307,7 +307,7 @@ func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool, createData
versionStr, _ := json.Marshal(
map[string]map[string]int{
"mds": {
- "ceph version 17.2.0-0-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) quincy (stable)": 2,
+ "ceph version 19.2.0-0-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) squid (stable)": 2,
},
})
return string(versionStr), nil
@@ -361,7 +361,7 @@ func TestCreateFilesystem(t *testing.T) {
ConfigDir: configDir,
Clientset: clientset}
fs := fsTest(fsName)
- clusterInfo := &cephclient.ClusterInfo{FSID: "myfsid", CephVersion: version.Quincy, Context: ctx}
+ clusterInfo := &cephclient.ClusterInfo{FSID: "myfsid", CephVersion: version.Squid, Context: ctx}
ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef()
t.Run("start basic filesystem", func(t *testing.T) {
@@ -405,7 +405,7 @@ func TestCreateFilesystem(t *testing.T) {
})
t.Run("multi filesystem creation should succeed", func(t *testing.T) {
- clusterInfo.CephVersion = version.Quincy
+ clusterInfo.CephVersion = version.Squid
err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/")
assert.NoError(t, err)
})
@@ -427,7 +427,7 @@ func TestUpgradeFilesystem(t *testing.T) {
ConfigDir: configDir,
Clientset: clientset}
fs := fsTest(fsName)
- clusterInfo := &cephclient.ClusterInfo{FSID: "myfsid", CephVersion: version.Quincy, Context: ctx}
+ clusterInfo := &cephclient.ClusterInfo{FSID: "myfsid", CephVersion: version.Squid, Context: ctx}
// start a basic cluster for upgrade
ownerInfo := cephclient.NewMinimumOwnerInfoWithOwnerRef()
@@ -438,7 +438,7 @@ func TestUpgradeFilesystem(t *testing.T) {
testopk8s.ClearDeploymentsUpdated(deploymentsUpdated)
// do upgrade
- clusterInfo.CephVersion = version.Quincy
+ clusterInfo.CephVersion = version.Squid
context = &clusterd.Context{
Executor: executor,
ConfigDir: configDir,
@@ -473,56 +473,81 @@ func TestUpgradeFilesystem(t *testing.T) {
},
}
createdFsResponse, _ := json.Marshal(mdsmap)
- firstGet := false
+
+ // actual version
+ clusterInfo.CephVersion = version.Squid
+ // mocked version to cause an error different from the actual version
+ mockedVersionStr, _ := json.Marshal(
+ map[string]map[string]int{
+ "mds": {
+ "ceph version 18.2.0-0-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) reef (stable)": 2,
+ },
+ })
executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
- if contains(args, "fs") && contains(args, "get") {
- if firstGet {
- firstGet = false
- return "", errors.New("fs doesn't exist")
+ if contains(args, "fs") {
+ if contains(args, "get") {
+ return string(createdFsResponse), nil
+ } else if contains(args, "ls") {
+ return "[]", nil
+ } else if contains(args, "dump") {
+ return `{"standbys":[], "filesystems":[]}`, nil
+ } else if contains(args, "subvolumegroup") {
+ return "[]", nil
+ }
+ }
+ if contains(args, "osd") {
+ if contains(args, "lspools") {
+ return "[]", nil
}
- return string(createdFsResponse), nil
- } else if contains(args, "fs") && contains(args, "ls") {
- return "[]", nil
- } else if contains(args, "fs") && contains(args, "dump") {
- return `{"standbys":[], "filesystems":[]}`, nil
- } else if contains(args, "osd") && contains(args, "lspools") {
- return "[]", nil
- } else if contains(args, "mds") && contains(args, "fail") {
+ if contains(args, "pool") && contains(args, "application") {
+ if contains(args, "get") {
+ return `{"":{}}`, nil
+ }
+ return "[]", nil
+ }
+ if reflect.DeepEqual(args[1:3], []string{"pool", "get"}) {
+ return "", errors.New("test pool does not exist yet")
+ }
+ }
+ if contains(args, "mds") && contains(args, "fail") {
return "", errors.New("fail mds failed")
- } else if isBasePoolOperation(fsName, command, args) {
- return "", nil
- } else if reflect.DeepEqual(args[0:5], []string{"fs", "new", fsName, fsName + "-metadata", fsName + "-data0"}) {
- return "", nil
- } else if contains(args, "auth") && contains(args, "get-or-create-key") {
- return "{\"key\":\"mysecurekey\"}", nil
- } else if contains(args, "auth") && contains(args, "del") {
- return "", nil
- } else if contains(args, "config") && contains(args, "mds_cache_memory_limit") {
- return "", nil
- } else if contains(args, "set") && contains(args, "max_mds") {
- return "", nil
- } else if contains(args, "set") && contains(args, "allow_standby_replay") {
+ }
+ if isBasePoolOperation(fsName, command, args) {
return "", nil
- } else if contains(args, "config") && contains(args, "mds_join_fs") {
+ }
+ if reflect.DeepEqual(args[0:5], []string{"fs", "new", fsName, fsName + "-metadata", fsName + "-data0"}) {
return "", nil
- } else if contains(args, "config") && contains(args, "get") {
- return "{}", nil
- } else if reflect.DeepEqual(args[0:3], []string{"osd", "pool", "get"}) {
- return "", errors.New("test pool does not exist yet")
- } else if contains(args, "versions") {
- versionStr, _ := json.Marshal(
- map[string]map[string]int{
- "mds": {
- "ceph version 17.2.0-0-g2f728b9 (2f728b952cf293dd7f809ad8a0f5b5d040c43010) quincy (stable)": 2,
- },
- })
- return string(versionStr), nil
+ }
+ if contains(args, "auth") {
+ if contains(args, "get-or-create-key") {
+ return "{\"key\":\"mysecurekey\"}", nil
+ } else if contains(args, "auth") && contains(args, "del") {
+ return "", nil
+ }
+ }
+ if contains(args, "config") {
+ if contains(args, "mds_cache_memory_limit") {
+ return "", nil
+ } else if contains(args, "mds_join_fs") {
+ return "", nil
+ } else if contains(args, "get") {
+ return "{}", nil
+ }
+ }
+ if contains(args, "set") {
+ if contains(args, "max_mds") {
+ return "", nil
+ } else if contains(args, "allow_standby_replay") {
+ return "", nil
+ }
+ }
+ if contains(args, "versions") {
+ return string(mockedVersionStr), nil
}
assert.Fail(t, fmt.Sprintf("Unexpected command %q %q", command, args))
return "", nil
}
// do upgrade
- clusterInfo.CephVersion = version.Reef
context = &clusterd.Context{
Executor: executor,
ConfigDir: configDir,
diff --git a/pkg/operator/ceph/file/mds/spec_test.go b/pkg/operator/ceph/file/mds/spec_test.go
index 803c3b6c019e..f41b077bfe26 100644
--- a/pkg/operator/ceph/file/mds/spec_test.go
+++ b/pkg/operator/ceph/file/mds/spec_test.go
@@ -60,7 +60,7 @@ func testDeploymentObject(t *testing.T, network cephv1.NetworkSpec) (*apps.Deplo
}
clusterInfo := &cephclient.ClusterInfo{
FSID: "myfsid",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clientset := testop.New(t, 1)
diff --git a/pkg/operator/ceph/file/mirror/controller_test.go b/pkg/operator/ceph/file/mirror/controller_test.go
index 1edcd6253ea1..e73f83d0762d 100644
--- a/pkg/operator/ceph/file/mirror/controller_test.go
+++ b/pkg/operator/ceph/file/mirror/controller_test.go
@@ -228,7 +228,7 @@ func TestCephFilesystemMirrorController(t *testing.T) {
}
currentAndDesiredCephVersion = func(ctx context.Context, rookImage string, namespace string, jobName string, ownerInfo *k8sutil.OwnerInfo, context *clusterd.Context, cephClusterSpec *cephv1.ClusterSpec, clusterInfo *client.ClusterInfo) (*version.CephVersion, *version.CephVersion, error) {
- return &version.Quincy, &version.Reef, nil
+ return &version.Squid, &version.Reef, nil
}
res, err := r.Reconcile(ctx, req)
diff --git a/pkg/operator/ceph/file/mirror/spec_test.go b/pkg/operator/ceph/file/mirror/spec_test.go
index 256705fab3c9..9d35d81b2b4e 100644
--- a/pkg/operator/ceph/file/mirror/spec_test.go
+++ b/pkg/operator/ceph/file/mirror/spec_test.go
@@ -74,7 +74,7 @@ func TestPodSpec(t *testing.T) {
TypeMeta: controllerTypeMeta,
}
clusterInfo := &cephclient.ClusterInfo{
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
s := scheme.Scheme
object := []runtime.Object{fsMirror}
diff --git a/pkg/operator/ceph/nfs/controller_test.go b/pkg/operator/ceph/nfs/controller_test.go
index f03292ec063b..34bd23b76dc3 100644
--- a/pkg/operator/ceph/nfs/controller_test.go
+++ b/pkg/operator/ceph/nfs/controller_test.go
@@ -218,7 +218,7 @@ func TestCephNFSController(t *testing.T) {
}
currentAndDesiredCephVersion = func(ctx context.Context, rookImage string, namespace string, jobName string, ownerInfo *k8sutil.OwnerInfo, context *clusterd.Context, cephClusterSpec *cephv1.ClusterSpec, clusterInfo *cephclient.ClusterInfo) (*version.CephVersion, *version.CephVersion, error) {
- return &version.Quincy, &version.Quincy, nil
+ return &version.Squid, &version.Squid, nil
}
t.Run("error - no ceph cluster", func(t *testing.T) {
diff --git a/pkg/operator/ceph/nfs/nfs_test.go b/pkg/operator/ceph/nfs/nfs_test.go
index f8a812a507a2..be90a8ab99ba 100644
--- a/pkg/operator/ceph/nfs/nfs_test.go
+++ b/pkg/operator/ceph/nfs/nfs_test.go
@@ -49,7 +49,7 @@ func TestReconcileCephNFS_createConfigMap(t *testing.T) {
context: c,
clusterInfo: &cephclient.ClusterInfo{
FSID: "myfsid",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
},
cephClusterSpec: &cephv1.ClusterSpec{
CephVersion: cephv1.CephVersionSpec{
@@ -154,7 +154,7 @@ func TestReconcileCephNFS_upCephNFS(t *testing.T) {
context: c,
clusterInfo: &cephclient.ClusterInfo{
FSID: "myfsid",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
Context: context.TODO(),
Namespace: ns,
},
diff --git a/pkg/operator/ceph/nfs/security_test.go b/pkg/operator/ceph/nfs/security_test.go
index 681980598a88..d03853a5ed6c 100644
--- a/pkg/operator/ceph/nfs/security_test.go
+++ b/pkg/operator/ceph/nfs/security_test.go
@@ -47,11 +47,11 @@ func mockReconcile() *ReconcileCephNFS {
return &ReconcileCephNFS{
clusterInfo: &cephclient.ClusterInfo{
FSID: "myfsid",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
},
cephClusterSpec: &cephv1.ClusterSpec{
CephVersion: cephv1.CephVersionSpec{
- Image: "quay.io/ceph/ceph:v17",
+ Image: "quay.io/ceph/ceph:v19",
},
},
}
@@ -191,7 +191,7 @@ func TestReconcileCephNFS_addSecurityConfigsToPod(t *testing.T) {
nss := containerByName(pod.InitContainers, "generate-nsswitch-conf")
assert.NotEmpty(t, nss)
// container should have CLUSTER image and resources from SERVER spec
- assert.Equal(t, "quay.io/ceph/ceph:v17", nss.Image)
+ assert.Equal(t, "quay.io/ceph/ceph:v19", nss.Image)
nssTester := optest.NewContainersSpecTester(t, []v1.Container{nss})
nssTester.AssertResourceSpec(optest.ResourceLimitExpectations{
CPUResourceLimit: "3000",
@@ -308,7 +308,7 @@ func TestReconcileCephNFS_addSecurityConfigsToPod(t *testing.T) {
nss := containerByName(pod.InitContainers, "generate-nsswitch-conf")
assert.NotEmpty(t, nss)
// container should have CLUSTER image and resources from SERVER spec
- assert.Equal(t, "quay.io/ceph/ceph:v17", nss.Image)
+ assert.Equal(t, "quay.io/ceph/ceph:v19", nss.Image)
nssTester := optest.NewContainersSpecTester(t, []v1.Container{nss})
nssTester.AssertResourceSpec(optest.ResourceLimitExpectations{
CPUResourceLimit: "3000",
diff --git a/pkg/operator/ceph/nfs/spec_test.go b/pkg/operator/ceph/nfs/spec_test.go
index 870321581a39..87ec566a50dc 100644
--- a/pkg/operator/ceph/nfs/spec_test.go
+++ b/pkg/operator/ceph/nfs/spec_test.go
@@ -71,11 +71,11 @@ func newDeploymentSpecTest(t *testing.T) (*ReconcileCephNFS, daemonConfig) {
context: c,
clusterInfo: &cephclient.ClusterInfo{
FSID: "myfsid",
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
},
cephClusterSpec: &cephv1.ClusterSpec{
CephVersion: cephv1.CephVersionSpec{
- Image: "quay.io/ceph/ceph:v17",
+ Image: "quay.io/ceph/ceph:v19",
},
},
}
diff --git a/pkg/operator/ceph/object/config.go b/pkg/operator/ceph/object/config.go
index 31082d7bf676..cbfc3ce99491 100644
--- a/pkg/operator/ceph/object/config.go
+++ b/pkg/operator/ceph/object/config.go
@@ -213,7 +213,7 @@ func (c *clusterConfig) setFlagsMonConfigStore(rgwConfig *rgwConfig) error {
// swift in the resource, `admin` is required for the operator to
// work, `swift_auth` is required to access swift without keystone
// – not sure about the additional APIs
- // https://docs.ceph.com/en/quincy/radosgw/config-ref/#confval-rgw_enable_apis
+ // https://docs.ceph.com/en/latest/radosgw/config-ref/#confval-rgw_enable_apis
// see also https://docs.ceph.com/en/octopus/radosgw/config-ref/#swift-settings on disabling s3
// when using '/' as prefix
diff --git a/pkg/operator/ceph/object/config_test.go b/pkg/operator/ceph/object/config_test.go
index ce5f2eda57e6..88d3ad9d8a72 100644
--- a/pkg/operator/ceph/object/config_test.go
+++ b/pkg/operator/ceph/object/config_test.go
@@ -29,7 +29,7 @@ import (
func newConfig(t *testing.T) *clusterConfig {
clusterInfo := &cephclient.ClusterInfo{
- CephVersion: cephver.Quincy,
+ CephVersion: cephver.Squid,
}
clusterSpec := &cephv1.ClusterSpec{
Network: cephv1.NetworkSpec{
diff --git a/pkg/operator/ceph/object/controller_test.go b/pkg/operator/ceph/object/controller_test.go
index 1920a834b015..c6e1d2cda31e 100644
--- a/pkg/operator/ceph/object/controller_test.go
+++ b/pkg/operator/ceph/object/controller_test.go
@@ -149,7 +149,7 @@ const (
dummyVersionsRaw = `
{
"mon": {
- "ceph version 17.2.1 (0000000000000000) quincy (stable)": 3
+ "ceph version 19.2.1 (0000000000000000) squid (stable)": 3
}
}`
//nolint:gosec // only test values, not a real secret
@@ -1049,22 +1049,22 @@ func TestDiffVersions(t *testing.T) {
if args[0] == "versions" {
return `{
"mon": {
- "ceph version 17.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) quincy (dev)": 3
+ "ceph version 19.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) squid (dev)": 3
},
"mgr": {
- "ceph version 17.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) quincy (dev)": 1
+ "ceph version 19.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) squid (dev)": 1
},
"osd": {
- "ceph version 17.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) quincy (dev)": 3
+ "ceph version 19.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) squid (dev)": 3
},
"mds": {
- "ceph version 17.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) quincy (dev)": 2
+ "ceph version 19.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) squid (dev)": 2
},
"rgw": {
- "ceph version 17.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) quincy (dev)": 1
+ "ceph version 19.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) squid (dev)": 1
},
"overall": {
- "ceph version 17.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) quincy (dev)": 10
+ "ceph version 19.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) squid (dev)": 10
}
}`, nil
}
@@ -1074,7 +1074,7 @@ func TestDiffVersions(t *testing.T) {
c := &clusterd.Context{Executor: executor}
// desiredCephVersion comes from DetectCephVersion() (ceph --version) which uses ExtractCephVersion()
- desiredCephVersion, err := cephver.ExtractCephVersion("ceph version 17.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) quincy (dev)")
+ desiredCephVersion, err := cephver.ExtractCephVersion("ceph version 19.0.0-9718-g4ff72306 (4ff723061fc15c803dcf6556d02f56bdf56de5fa) squid (dev)")
assert.NoError(t, err)
// runningCephVersion comes from LeastUptodateDaemonVersion()
diff --git a/pkg/operator/ceph/object/objectstore.go b/pkg/operator/ceph/object/objectstore.go
index b34642938325..a851520feea0 100644
--- a/pkg/operator/ceph/object/objectstore.go
+++ b/pkg/operator/ceph/object/objectstore.go
@@ -31,8 +31,6 @@ import (
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
- "github.com/rook/rook/pkg/operator/ceph/config"
- cephver "github.com/rook/rook/pkg/operator/ceph/version"
"github.com/rook/rook/pkg/operator/k8sutil"
"github.com/rook/rook/pkg/util"
"github.com/rook/rook/pkg/util/exec"
@@ -73,8 +71,7 @@ var (
dataPoolName = "rgw.buckets.data"
// An user with system privileges for dashboard service
- DashboardUser = "dashboard-admin"
- rgwPgNumRemovedQuincyVersion = cephver.CephVersion{Major: 17, Minor: 2, Extra: 2}
+ DashboardUser = "dashboard-admin"
)
type idType struct {
@@ -760,20 +757,7 @@ func CreateObjectStorePools(context *Context, cluster *cephv1.ClusterSpec, metad
}
}
- // get the default PG count for rgw metadata pools
- var err error
- var metadataPoolPGs string
- if rgwRadosPGNumIsNew(context.clusterInfo.CephVersion) {
- metadataPoolPGs = rgwRadosPoolPgNum
- } else {
- metadataPoolPGs, err = config.GetMonStore(context.Context, context.clusterInfo).Get("mon.", "rgw_rados_pool_pg_num_min")
- }
- if err != nil {
- logger.Warningf("failed to adjust the PG count for rgw metadata pools. using the general default. %v", err)
- metadataPoolPGs = cephclient.DefaultPGCount
- }
-
- if err := createSimilarPools(context, append(metadataPools, rootPool), cluster, metadataPool, metadataPoolPGs); err != nil {
+ if err := createSimilarPools(context, append(metadataPools, rootPool), cluster, metadataPool, rgwRadosPoolPgNum); err != nil {
return errors.Wrap(err, "failed to create metadata pools")
}
@@ -1069,12 +1053,6 @@ func zoneUpdatePlacementWorkaround(objContext *Context, placementID string, expe
return nil
}
-// Check if this is a recent release of ceph where the legacy rgw_rados_pool_pg_num_min
-// is no longer available.
-func rgwRadosPGNumIsNew(cephVer cephver.CephVersion) bool {
- return cephVer.IsAtLeast(rgwPgNumRemovedQuincyVersion)
-}
-
// configurePoolsConcurrently checks if operator pod resources are set or not
func configurePoolsConcurrently() bool {
// if operator resources are specified return false as it will lead to operator pod killed due to resource limit
diff --git a/pkg/operator/ceph/object/objectstore_test.go b/pkg/operator/ceph/object/objectstore_test.go
index 7c0d16c851c4..4662349a53e5 100644
--- a/pkg/operator/ceph/object/objectstore_test.go
+++ b/pkg/operator/ceph/object/objectstore_test.go
@@ -548,12 +548,6 @@ func TestGetObjectBucketProvisioner(t *testing.T) {
}
-func TestRGWPGNumVersion(t *testing.T) {
- assert.False(t, rgwRadosPGNumIsNew(cephver.CephVersion{Major: 17, Minor: 2, Extra: 1}))
- assert.True(t, rgwRadosPGNumIsNew(cephver.CephVersion{Major: 17, Minor: 2, Extra: 2}))
- assert.True(t, rgwRadosPGNumIsNew(cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}))
-}
-
func TestCheckDashboardUser(t *testing.T) {
storeName := "myobject"
executor := &exectest.MockExecutor{
diff --git a/pkg/operator/ceph/object/spec.go b/pkg/operator/ceph/object/spec.go
index b72910fb9b55..83d338f5a5dc 100644
--- a/pkg/operator/ceph/object/spec.go
+++ b/pkg/operator/ceph/object/spec.go
@@ -32,7 +32,6 @@ import (
"github.com/rook/rook/pkg/daemon/ceph/osd/kms"
cephconfig "github.com/rook/rook/pkg/operator/ceph/config"
"github.com/rook/rook/pkg/operator/ceph/controller"
- cephver "github.com/rook/rook/pkg/operator/ceph/version"
"github.com/rook/rook/pkg/operator/k8sutil"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
@@ -70,8 +69,6 @@ chown --recursive --verbose ceph:ceph $VAULT_TOKEN_NEW_PATH
)
var (
- cephVersionMinRGWSSES3 = cephver.CephVersion{Major: 17, Minor: 2, Extra: 3}
-
//go:embed rgw-probe.sh
rgwProbeScriptTemplate string
)
@@ -693,9 +690,6 @@ func (c *clusterConfig) CheckRGWKMS() (bool, error) {
func (c *clusterConfig) CheckRGWSSES3Enabled() (bool, error) {
if c.store.Spec.Security != nil && c.store.Spec.Security.ServerSideEncryptionS3.IsEnabled() {
- if !c.clusterInfo.CephVersion.IsAtLeast(cephVersionMinRGWSSES3) {
- return false, errors.New("minimum ceph quincy is required for AWS-SSE:S3")
- }
err := kms.ValidateConnectionDetails(c.clusterInfo.Context, c.context, &c.store.Spec.Security.ServerSideEncryptionS3, c.store.Namespace)
if err != nil {
return false, err
diff --git a/pkg/operator/ceph/object/spec_test.go b/pkg/operator/ceph/object/spec_test.go
index aeeaa368130e..f920f5f5a1cb 100644
--- a/pkg/operator/ceph/object/spec_test.go
+++ b/pkg/operator/ceph/object/spec_test.go
@@ -82,7 +82,7 @@ func TestPodSpecs(t *testing.T) {
}
store.Spec.Gateway.PriorityClassName = "my-priority-class"
info := clienttest.CreateTestClusterInfo(1)
- info.CephVersion = cephver.Quincy
+ info.CephVersion = cephver.Squid
data := cephconfig.NewStatelessDaemonDataPathMap(cephconfig.RgwType, "default", "rook-ceph", "/var/lib/rook/")
c := &clusterConfig{
@@ -150,7 +150,7 @@ func TestSSLPodSpec(t *testing.T) {
}
store.Spec.Gateway.PriorityClassName = "my-priority-class"
info := clienttest.CreateTestClusterInfo(1)
- info.CephVersion = cephver.Quincy
+ info.CephVersion = cephver.Squid
info.Namespace = store.Namespace
data := cephconfig.NewStatelessDaemonDataPathMap(cephconfig.RgwType, "default", "rook-ceph", "/var/lib/rook/")
store.Spec.Gateway.SecurePort = 443
@@ -161,7 +161,7 @@ func TestSSLPodSpec(t *testing.T) {
context: context,
rookVersion: "rook/rook:myversion",
clusterSpec: &cephv1.ClusterSpec{
- CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v17"},
+ CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v19"},
Network: cephv1.NetworkSpec{
HostNetwork: true,
},
@@ -560,7 +560,7 @@ func TestCheckRGWSSES3Enabled(t *testing.T) {
store: store,
clusterInfo: &client.ClusterInfo{Context: ctx, CephVersion: cephver.CephVersion{Major: 17, Minor: 2, Extra: 3}},
clusterSpec: &cephv1.ClusterSpec{
- CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v17"},
+ CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v19"},
DataDirHostPath: "/var/lib/rook",
},
}
@@ -758,7 +758,7 @@ func TestAWSServerSideEncryption(t *testing.T) {
context: context,
rookVersion: "rook/rook:myversion",
clusterSpec: &cephv1.ClusterSpec{
- CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v17.3"},
+ CephVersion: cephv1.CephVersionSpec{Image: "quay.io/ceph/ceph:v19.3"},
Network: cephv1.NetworkSpec{
HostNetwork: true,
},
@@ -966,12 +966,12 @@ func TestAddDNSNamesToRGWPodSpec(t *testing.T) {
{"dns name with zone with custom endpoints ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "--rgw-dns-name=my.dns.name1,my.dns.name2,rook-ceph-rgw-default.mycluster.svc,my.custom.endpoint1,my.custom.endpoint2", cephV18, "myzone", []string{"http://my.custom.endpoint1:80", "http://my.custom.endpoint2:80"}, false},
{"dns name with zone with custom invalid endpoints ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "", cephV18, "myzone", []string{"http://my.custom.endpoint:80", "http://!my.invalid-custom.endpoint:80"}, true},
{"dns name with zone with mixed invalid and valid dnsnames/custom endpoint ceph v18", []string{"my.dns.name", "!my.dns.name"}, "", cephV18, "myzone", []string{"http://my.custom.endpoint1:80", "http://my.custom.endpoint2:80:80"}, true},
- {"no dns names ceph v17", []string{}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, false},
- {"one dns name ceph v17", []string{"my.dns.name"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
- {"multiple dns names ceph v17", []string{"my.dns.name1", "my.dns.name2"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
- {"duplicate dns names ceph v17", []string{"my.dns.name1", "my.dns.name2", "my.dns.name2"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
- {"invalid dns name ceph v17", []string{"!my.invalid-dns.name"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
- {"mixed invalid and valid dns names ceph v17", []string{"my.dns.name", "!my.invalid-dns.name"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
+ {"no dns names ceph v19", []string{}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, false},
+ {"one dns name ceph v19", []string{"my.dns.name"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
+ {"multiple dns names ceph v19", []string{"my.dns.name1", "my.dns.name2"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
+ {"duplicate dns names ceph v19", []string{"my.dns.name1", "my.dns.name2", "my.dns.name2"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
+ {"invalid dns name ceph v19", []string{"!my.invalid-dns.name"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
+ {"mixed invalid and valid dns names ceph v19", []string{"my.dns.name", "!my.invalid-dns.name"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
{"no dns names ceph v19", []string{}, "", cephver.CephVersion{Major: 19, Minor: 0, Extra: 0}, "", []string{}, false},
{"no dns names with zone ceph v19", []string{}, "", cephver.CephVersion{Major: 19, Minor: 0, Extra: 0}, "myzone", []string{}, false},
{"no dns names with zone and custom endpoints ceph v19", []string{}, "", cephver.CephVersion{Major: 19, Minor: 0, Extra: 0}, "myzone", []string{"http://my.custom.endpoint1:80", "http://my.custom.endpoint2:80"}, false},
diff --git a/pkg/operator/ceph/version/version.go b/pkg/operator/ceph/version/version.go
index 705258102da1..de0bbfb96ffb 100644
--- a/pkg/operator/ceph/version/version.go
+++ b/pkg/operator/ceph/version/version.go
@@ -40,10 +40,8 @@ const (
var (
// Minimum supported version
- Minimum = CephVersion{17, 2, 0, 0, ""}
+ Minimum = CephVersion{18, 2, 0, 0, ""}
- // Quincy Ceph version
- Quincy = CephVersion{17, 0, 0, 0, ""}
// Reef Ceph version
Reef = CephVersion{18, 0, 0, 0, ""}
// Squid ceph version
@@ -52,7 +50,7 @@ var (
Tentacle = CephVersion{20, 0, 0, 0, ""}
// supportedVersions are production-ready versions that rook supports
- supportedVersions = []CephVersion{Quincy, Reef, Squid}
+ supportedVersions = []CephVersion{Reef, Squid}
// unsupportedVersions are possibly Ceph pin-point release that introduced breaking changes and not recommended
unsupportedVersions []CephVersion
@@ -86,8 +84,6 @@ func (v *CephVersion) CephVersionFormatted() string {
// ReleaseName is the name of the Ceph release
func (v *CephVersion) ReleaseName() string {
switch v.Major {
- case Quincy.Major:
- return "quincy"
case Reef.Major:
return "reef"
case Squid.Major:
@@ -169,11 +165,6 @@ func (v *CephVersion) isExactly(other CephVersion) bool {
return v.Major == other.Major && v.Minor == other.Minor && v.Extra == other.Extra
}
-// IsQuincy checks if the Ceph version is Quincy
-func (v *CephVersion) IsQuincy() bool {
- return v.isRelease(Quincy)
-}
-
// IsReef checks if the Ceph version is Reef
func (v *CephVersion) IsReef() bool {
return v.isRelease(Reef)
@@ -227,11 +218,6 @@ func (v *CephVersion) IsAtLeastReef() bool {
return v.IsAtLeast(Reef)
}
-// IsAtLeastQuincy checks that the Ceph version is at least Quincy
-func (v *CephVersion) IsAtLeastQuincy() bool {
- return v.IsAtLeast(Quincy)
-}
-
// IsIdentical checks if Ceph versions are identical
func IsIdentical(a, b CephVersion) bool {
if a.Major == b.Major {
diff --git a/pkg/operator/ceph/version/version_test.go b/pkg/operator/ceph/version/version_test.go
index 1a95a3f1cd9e..013bf986f93c 100644
--- a/pkg/operator/ceph/version/version_test.go
+++ b/pkg/operator/ceph/version/version_test.go
@@ -24,7 +24,7 @@ import (
)
func TestToString(t *testing.T) {
- assert.Equal(t, "17.0.0-0 quincy", Quincy.String())
+ assert.Equal(t, "19.0.0-0 squid", Squid.String())
received := CephVersion{-1, 0, 0, 0, ""}
expected := fmt.Sprintf("-1.0.0-0 %s", unknownVersionString)
@@ -32,11 +32,11 @@ func TestToString(t *testing.T) {
}
func TestCephVersionFormatted(t *testing.T) {
- assert.Equal(t, "ceph version 17.0.0-0 quincy", Quincy.CephVersionFormatted())
+ assert.Equal(t, "ceph version 19.0.0-0 squid", Squid.CephVersionFormatted())
}
func TestReleaseName(t *testing.T) {
- assert.Equal(t, "quincy", Quincy.ReleaseName())
+ assert.Equal(t, "squid", Squid.ReleaseName())
ver := CephVersion{-1, 0, 0, 0, ""}
assert.Equal(t, unknownVersionString, ver.ReleaseName())
}
@@ -79,7 +79,7 @@ ceph version 18.1.33-403-g7ba6bece41
v2d := `
bin/ceph --version
*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***
-ceph version Development (no_version) quincy (rc)
+ceph version Development (no_version) reef (rc)
`
v, err := ExtractCephVersion(v2c)
assert.Error(t, err)
@@ -105,23 +105,24 @@ func TestSupported(t *testing.T) {
}
func TestIsRelease(t *testing.T) {
- assert.True(t, Quincy.isRelease(Quincy))
assert.True(t, Reef.isRelease(Reef))
+ assert.True(t, Squid.isRelease(Squid))
- assert.False(t, Reef.isRelease(Quincy))
+ assert.False(t, Reef.isRelease(Squid))
- QuincyUpdate := Quincy
- QuincyUpdate.Minor = 33
- QuincyUpdate.Extra = 4
- assert.True(t, QuincyUpdate.isRelease(Quincy))
+ ReefUpdate := Reef
+ ReefUpdate.Minor = 33
+ ReefUpdate.Extra = 4
+ assert.True(t, ReefUpdate.isRelease(Reef))
}
func TestIsReleaseX(t *testing.T) {
- assert.False(t, Quincy.IsReef())
+ assert.False(t, Squid.IsReef())
}
func TestVersionAtLeast(t *testing.T) {
- assert.True(t, Quincy.IsAtLeast(Quincy))
+ assert.True(t, Squid.IsAtLeast(Squid))
+ assert.True(t, Squid.IsAtLeast(Reef))
assert.True(t, (&CephVersion{1, 0, 0, 0, ""}).IsAtLeast(CephVersion{0, 0, 0, 0, ""}))
assert.False(t, (&CephVersion{0, 0, 0, 0, ""}).IsAtLeast(CephVersion{1, 0, 0, 0, ""}))
@@ -133,8 +134,8 @@ func TestVersionAtLeast(t *testing.T) {
}
func TestVersionAtLeastX(t *testing.T) {
- assert.True(t, Quincy.IsAtLeastQuincy())
- assert.False(t, Quincy.IsAtLeastReef())
+ assert.True(t, Reef.IsAtLeastReef())
+ assert.False(t, Reef.IsAtLeastSquid())
}
func TestIsIdentical(t *testing.T) {
@@ -205,7 +206,7 @@ func TestCephVersion_Unsupported(t *testing.T) {
fields fields
want bool
}{
- {"quincy", fields{Major: 17, Minor: 2, Extra: 0, Build: 0}, false},
+ {"squid", fields{Major: 19, Minor: 2, Extra: 0, Build: 0}, false},
{"reef", fields{Major: 18, Minor: 2, Extra: 0, Build: 0}, false},
}
for _, tt := range tests {
diff --git a/tests/framework/installer/ceph_installer.go b/tests/framework/installer/ceph_installer.go
index 9d7a725b7fad..a02757fcd83d 100644
--- a/tests/framework/installer/ceph_installer.go
+++ b/tests/framework/installer/ceph_installer.go
@@ -43,13 +43,11 @@ import (
const (
// test with the latest releases
- quincyTestImage = "quay.io/ceph/ceph:v17"
- reefTestImage = "quay.io/ceph/ceph:v18"
- squidTestImage = "quay.io/ceph/ceph:v19"
+ reefTestImage = "quay.io/ceph/ceph:v18"
+ squidTestImage = "quay.io/ceph/ceph:v19"
// test with the current development versions
- quincyDevelTestImage = "quay.io/ceph/daemon-base:latest-quincy-devel"
- reefDevelTestImage = "quay.io/ceph/daemon-base:latest-reef-devel"
- squidDevelTestImage = "quay.io/ceph/daemon-base:latest-squid-devel"
+ reefDevelTestImage = "quay.io/ceph/daemon-base:latest-reef-devel"
+ squidDevelTestImage = "quay.io/ceph/daemon-base:latest-squid-devel"
// test with the latest Ceph main image
mainTestImage = "quay.io/ceph/daemon-base:latest-main-devel"
cephOperatorLabel = "app=rook-ceph-operator"
@@ -69,8 +67,6 @@ mon compact on start = true
)
var (
- QuincyVersion = cephv1.CephVersionSpec{Image: quincyTestImage}
- QuincyDevelVersion = cephv1.CephVersionSpec{Image: quincyDevelTestImage}
ReefVersion = cephv1.CephVersionSpec{Image: reefTestImage}
ReefDevelVersion = cephv1.CephVersionSpec{Image: reefDevelTestImage}
SquidVersion = cephv1.CephVersionSpec{Image: squidTestImage}
@@ -97,8 +93,6 @@ func ReturnCephVersion() cephv1.CephVersionSpec {
switch os.Getenv("CEPH_SUITE_VERSION") {
case "main":
return MainVersion
- case "quincy-devel":
- return QuincyDevelVersion
case "reef-devel":
return ReefDevelVersion
case "squid-devel":
diff --git a/tests/integration/ceph_auth_keystone_test.go b/tests/integration/ceph_auth_keystone_test.go
index cf4625b725ad..8d0de029cd06 100644
--- a/tests/integration/ceph_auth_keystone_test.go
+++ b/tests/integration/ceph_auth_keystone_test.go
@@ -93,7 +93,7 @@ func (h *KeystoneAuthSuite) SetupSuite() {
ChangeHostName: true,
ConnectionsEncrypted: true,
RookVersion: installer.LocalBuildTag,
- CephVersion: installer.QuincyVersion,
+ CephVersion: installer.ReefVersion,
SkipClusterCleanup: false,
SkipCleanupPolicy: false,
}
diff --git a/tests/integration/ceph_helm_test.go b/tests/integration/ceph_helm_test.go
index 884573f60f29..de16e4319973 100644
--- a/tests/integration/ceph_helm_test.go
+++ b/tests/integration/ceph_helm_test.go
@@ -70,7 +70,7 @@ func (h *HelmSuite) SetupSuite() {
ChangeHostName: true,
ConnectionsEncrypted: true,
RookVersion: installer.LocalBuildTag,
- CephVersion: installer.QuincyVersion,
+ CephVersion: installer.ReefVersion,
}
h.settings.ApplyEnvVars()
h.installer, h.k8shelper = StartTestCluster(h.T, h.settings)
diff --git a/tests/integration/ceph_multi_cluster_test.go b/tests/integration/ceph_multi_cluster_test.go
index 5e30ec6394ca..0f4e3c596d8d 100644
--- a/tests/integration/ceph_multi_cluster_test.go
+++ b/tests/integration/ceph_multi_cluster_test.go
@@ -81,7 +81,7 @@ func (s *MultiClusterDeploySuite) SetupSuite() {
Mons: 1,
MultipleMgrs: true,
RookVersion: installer.LocalBuildTag,
- CephVersion: installer.QuincyVersion,
+ CephVersion: installer.SquidVersion,
RequireMsgr2: true,
}
s.settings.ApplyEnvVars()
@@ -91,7 +91,7 @@ func (s *MultiClusterDeploySuite) SetupSuite() {
Namespace: "multi-external",
OperatorNamespace: s.settings.OperatorNamespace,
RookVersion: s.settings.RookVersion,
- CephVersion: installer.QuincyVersion,
+ CephVersion: installer.SquidVersion,
}
externalSettings.ApplyEnvVars()
s.externalManifests = installer.NewCephManifests(externalSettings)
diff --git a/tests/integration/ceph_upgrade_test.go b/tests/integration/ceph_upgrade_test.go
index a31184e959de..04ee2b4bb7dc 100644
--- a/tests/integration/ceph_upgrade_test.go
+++ b/tests/integration/ceph_upgrade_test.go
@@ -97,11 +97,11 @@ func (s *UpgradeSuite) baseSetup(useHelm bool, initialRookVersion string, initia
}
func (s *UpgradeSuite) TestUpgradeRook() {
- s.testUpgrade(false, installer.QuincyVersion)
+ s.testUpgrade(false, installer.ReefVersion)
}
func (s *UpgradeSuite) TestUpgradeHelm() {
- s.testUpgrade(true, installer.QuincyVersion)
+ s.testUpgrade(true, installer.ReefVersion)
}
func (s *UpgradeSuite) testUpgrade(useHelm bool, initialCephVersion v1.CephVersionSpec) {
@@ -158,17 +158,6 @@ func (s *UpgradeSuite) testUpgrade(useHelm bool, initialCephVersion v1.CephVersi
return
}
- //
- // Upgrade from quincy to reef
- //
- logger.Infof("*** UPGRADING CEPH FROM QUINCY TO REEF ***")
- s.gatherLogs(s.settings.OperatorNamespace, "_before_reef_upgrade")
- s.upgradeCephVersion(installer.ReefVersion.Image, numOSDs)
- // Verify reading and writing to the test clients
- newFile = "post-reef-upgrade-file"
- s.verifyFilesAfterUpgrade(newFile, rbdFilesToRead, cephfsFilesToRead)
- logger.Infof("Verified upgrade from quincy to reef")
-
//
// Upgrade from reef to squid
//
@@ -183,40 +172,6 @@ func (s *UpgradeSuite) testUpgrade(useHelm bool, initialCephVersion v1.CephVersi
checkCephObjectUser(&s.Suite, s.helper, s.k8sh, s.namespace, installer.ObjectStoreName, objectUserID, true, false)
}
-func (s *UpgradeSuite) TestUpgradeCephToQuincyDevel() {
- baseRookImage := installer.LocalBuildTag
- s.baseSetup(false, baseRookImage, installer.QuincyVersion)
-
- objectUserID := "upgraded-user"
- preFilename := "pre-upgrade-file"
- s.settings.CephVersion = installer.QuincyVersion
- numOSDs, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(baseRookImage, objectUserID, preFilename)
- clusterInfo := client.AdminTestClusterInfo(s.namespace)
- requireBlockImagesRemoved := false
- defer func() {
- blockTestDataCleanUp(s.helper, s.k8sh, &s.Suite, clusterInfo, installer.BlockPoolName, installer.BlockPoolSCName, blockName, rbdPodName, requireBlockImagesRemoved)
- cleanupFilesystemConsumer(s.helper, s.k8sh, &s.Suite, s.namespace, filePodName)
- cleanupFilesystem(s.helper, s.k8sh, &s.Suite, s.namespace, installer.FilesystemName)
- _ = s.helper.ObjectUserClient.Delete(s.namespace, objectUserID)
- _ = s.helper.BucketClient.DeleteObc(obcName, installer.ObjectStoreSCName, bucketPrefix, maxObject, false)
- _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreName, "Delete")
- objectStoreCleanUp(&s.Suite, s.helper, s.k8sh, s.settings.Namespace, installer.ObjectStoreName)
- }()
-
- //
- // Upgrade from quincy to quincy devel
- //
- logger.Infof("*** UPGRADING CEPH FROM QUINCY STABLE TO QUINCY DEVEL ***")
- s.gatherLogs(s.settings.OperatorNamespace, "_before_quincy_upgrade")
- s.upgradeCephVersion(installer.QuincyDevelVersion.Image, numOSDs)
- // Verify reading and writing to the test clients
- newFile := "post-quincy-upgrade-file"
- s.verifyFilesAfterUpgrade(newFile, rbdFilesToRead, cephfsFilesToRead)
- logger.Infof("Verified upgrade from quincy stable to quincy devel")
-
- checkCephObjectUser(&s.Suite, s.helper, s.k8sh, s.namespace, installer.ObjectStoreName, objectUserID, true, false)
-}
-
func (s *UpgradeSuite) TestUpgradeCephToReefDevel() {
baseRookImage := installer.LocalBuildTag
s.baseSetup(false, baseRookImage, installer.ReefVersion)