From 045e73a4d8c0454dd2b78d63c028b4362f3a119b Mon Sep 17 00:00:00 2001 From: Stephan Feurer Date: Wed, 6 Nov 2024 10:08:30 +0100 Subject: [PATCH] Update change storage node size documentation --- .../how-tos/exoscale/change_storage_node_size.adoc | 11 ++++++++++- .../ROOT/partials/storage-ceph-backfilling.adoc | 9 +++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/docs/modules/ROOT/pages/how-tos/exoscale/change_storage_node_size.adoc b/docs/modules/ROOT/pages/how-tos/exoscale/change_storage_node_size.adoc index 20c3ddc9..d5f42fbc 100644 --- a/docs/modules/ROOT/pages/how-tos/exoscale/change_storage_node_size.adoc +++ b/docs/modules/ROOT/pages/how-tos/exoscale/change_storage_node_size.adoc @@ -100,8 +100,17 @@ echo $NODES_TO_REPLACE [source,bash] ---- terraform state rm "module.cluster.module.storage.random_id.node_id" -terraform state rm "module.cluster.module.storage.exoscale_compute.nodes" +terraform state rm "module.cluster.module.storage.exoscale_compute_instance.nodes" ---- ++ +[NOTE] +==== +If the cluster is using a dedicated hypervisor, you may need to also delete the affinity-group. +[source,bash] +---- +terraform state rm "module.cluster.module.storage.exoscale_anti_affinity_group.anti_affinity_group[0]" +---- +==== . Run Terraform to spin up replacement nodes + diff --git a/docs/modules/ROOT/partials/storage-ceph-backfilling.adoc b/docs/modules/ROOT/partials/storage-ceph-backfilling.adoc index f75cf10c..7a60b28e 100644 --- a/docs/modules/ROOT/partials/storage-ceph-backfilling.adoc +++ b/docs/modules/ROOT/partials/storage-ceph-backfilling.adoc @@ -9,9 +9,12 @@ If the storage cluster is mostly idle, you can speed up backfilling by temporari [source,bash] ---- kubectl --as=cluster-admin -n syn-rook-ceph-cluster exec -it deploy/rook-ceph-tools -- \ - ceph config set osd osd_max_backfills 10 <1> + ceph config set osd osd_mclock_override_recovery_settings true <1> +kubectl --as=cluster-admin -n syn-rook-ceph-cluster exec -it deploy/rook-ceph-tools -- \ + ceph config set osd osd_max_backfills 10 <2> ---- -<1> The number of PGs which are allowed to backfill in parallel. +<1> Allow overwriting `osd_max_backfills`. +<2> The number of PGs which are allowed to backfill in parallel. Adjust up or down depending on client load on the storage cluster. After backfilling is completed, you can remove the configuration with @@ -20,6 +23,8 @@ After backfilling is completed, you can remove the configuration with ---- kubectl --as=cluster-admin -n syn-rook-ceph-cluster exec -it deploy/rook-ceph-tools -- \ ceph config rm osd osd_max_backfills +kubectl --as=cluster-admin -n syn-rook-ceph-cluster exec -it deploy/rook-ceph-tools -- \ + ceph config rm osd osd_mclock_override_recovery_settings ---- ==== +