Skip to content

Commit

Permalink
Merge pull request rook#14772 from jhoblitt/bugfix/cephobjectstore-ex…
Browse files Browse the repository at this point in the history
…isting-pools

rgw: fix CephObjectStore failing with pre-existing pools
  • Loading branch information
travisn authored Oct 3, 2024
2 parents fc2ac66 + 581fd5c commit 0499534
Show file tree
Hide file tree
Showing 4 changed files with 297 additions and 21 deletions.
72 changes: 67 additions & 5 deletions .github/workflows/canary-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@ jobs:
tests/scripts/github-action-helper.sh create_partitions_for_osds
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster
run: |
tests/scripts/github-action-helper.sh deploy_cluster
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: setup csi-addons
run: tests/scripts/csiaddons.sh setup_csiaddons
Expand Down Expand Up @@ -364,6 +366,7 @@ jobs:
run: |
export ALLOW_LOOP_DEVICES=true
tests/scripts/github-action-helper.sh deploy_cluster loop
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
tests/scripts/github-action-helper.sh create_operator_toolbox
- name: wait for prepare pod
Expand Down Expand Up @@ -433,7 +436,9 @@ jobs:
tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --wipe-only
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster two_osds_in_device
run: |
tests/scripts/github-action-helper.sh deploy_cluster two_osds_in_device
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 2
Expand Down Expand Up @@ -482,6 +487,7 @@ jobs:
- name: deploy cluster
run: |
tests/scripts/github-action-helper.sh deploy_cluster osd_with_metadata_partition_device
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 1
Expand Down Expand Up @@ -537,7 +543,9 @@ jobs:
tests/scripts/github-action-helper.sh create_LV_on_disk $(sudo losetup --find --show test-rook.img)
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster osd_with_metadata_device
run: |
tests/scripts/github-action-helper.sh deploy_cluster osd_with_metadata_device
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 1
Expand Down Expand Up @@ -587,7 +595,9 @@ jobs:
tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --wipe-only
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster encryption
run: |
tests/scripts/github-action-helper.sh deploy_cluster encryption
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster encryption
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 1
Expand Down Expand Up @@ -642,7 +652,9 @@ jobs:
tests/scripts/github-action-helper.sh create_LV_on_disk $BLOCK
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster lvm
run: |
tests/scripts/github-action-helper.sh deploy_cluster lvm
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 1
Expand Down Expand Up @@ -1698,3 +1710,53 @@ jobs:
uses: ./.github/workflows/collect-logs
with:
name: ${{ github.job }}-${{ matrix.ceph-image }}

object-with-cephblockpools:
runs-on: ubuntu-22.04
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')"
strategy:
matrix:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0

- name: consider debugging
uses: ./.github/workflows/tmate_debug
with:
use-tmate: ${{ secrets.USE_TMATE }}

- name: setup cluster resources
uses: ./.github/workflows/canary-test-config

- name: set Ceph version in CephCluster manifest
run: tests/scripts/github-action-helper.sh replace_ceph_image "deploy/examples/cluster-test.yaml" "${{ github.event.inputs.ceph-image }}"

- name: validate-yaml
run: tests/scripts/github-action-helper.sh validate_yaml

- name: use local disk and create partitions for osds
run: |
tests/scripts/github-action-helper.sh use_local_disk
tests/scripts/github-action-helper.sh create_partitions_for_osds
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster

- name: create CephBlockPool(s) and CephObjectStore
shell: bash --noprofile --norc -eo pipefail -x {0}
run: kubectl create -f deploy/examples/object-with-cephblockpools-test.yaml

- name: wait for CephObjectStore to be ready
run: tests/scripts/validate_cluster.sh rgw object-with-cephblockpools

- name: check for pools created by RGW that are unexpected
run: tests/scripts/github-action-helper.sh test_object_with_cephblockpools_extra_pools

- name: collect common logs
if: always()
uses: ./.github/workflows/collect-logs
with:
name: ${{ github.job }}-${{ matrix.ceph-image }}
135 changes: 135 additions & 0 deletions deploy/examples/object-with-cephblockpools-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
##########################################################
# Create an object store using pre-created pools
# kubectl create -f object-with-cephblockpools-test.yaml
##########################################################
---
apiVersion: ceph.rook.io/v1
kind: CephObjectStore
metadata:
name: object-with-cephblockpools
namespace: rook-ceph # namespace:cluster
spec:
gateway:
port: 80
instances: 1
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: rgw.root
namespace: rook-ceph # namespace:cluster
spec:
application: rgw
failureDomain: osd
name: .rgw.root
parameters:
pg_autoscale_mode: "off"
pg_num: "1"
replicated:
requireSafeReplicaSize: false
size: 1
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: object-with-cephblockpools.rgw.control
namespace: rook-ceph # namespace:cluster
spec:
application: rgw
failureDomain: osd
parameters:
pg_autoscale_mode: "off"
pg_num: "1"
replicated:
requireSafeReplicaSize: false
size: 1
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: object-with-cephblockpools.rgw.meta
namespace: rook-ceph # namespace:cluster
spec:
application: rgw
failureDomain: osd
parameters:
pg_autoscale_mode: "off"
pg_num: "1"
replicated:
requireSafeReplicaSize: false
size: 1
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: object-with-cephblockpools.rgw.log
namespace: rook-ceph # namespace:cluster
spec:
application: rgw
failureDomain: osd
parameters:
pg_autoscale_mode: "off"
pg_num: "1"
replicated:
requireSafeReplicaSize: false
size: 1
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: object-with-cephblockpools.rgw.buckets.index
namespace: rook-ceph # namespace:cluster
spec:
application: rgw
failureDomain: osd
parameters:
pg_autoscale_mode: "off"
pg_num: "1"
replicated:
requireSafeReplicaSize: false
size: 1
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: object-with-cephblockpools.rgw.buckets.non-ec
namespace: rook-ceph # namespace:cluster
spec:
application: rgw
failureDomain: osd
parameters:
pg_autoscale_mode: "off"
pg_num: "1"
replicated:
requireSafeReplicaSize: false
size: 1
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: object-with-cephblockpools.rgw.otp
namespace: rook-ceph # namespace:cluster
spec:
application: rgw
failureDomain: osd
parameters:
pg_autoscale_mode: "off"
pg_num: "1"
replicated:
requireSafeReplicaSize: false
size: 1
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: object-with-cephblockpools.rgw.buckets.data
namespace: rook-ceph # namespace:cluster
spec:
application: rgw
failureDomain: osd
parameters:
pg_autoscale_mode: "off"
pg_num: "1"
replicated:
requireSafeReplicaSize: false
size: 1
3 changes: 3 additions & 0 deletions pkg/operator/ceph/object/objectstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -758,6 +758,9 @@ func CreateObjectStorePools(context *Context, cluster *cephv1.ClusterSpec, metad
if len(missingPools) > 0 {
return fmt.Errorf("CR store pools are missing: %v", missingPools)
}

// pools exist, nothing to do
return nil
}

// get the default PG count for rgw metadata pools
Expand Down
Loading

0 comments on commit 0499534

Please sign in to comment.