From 1a96168ad0464fcbc20208560c9f289df07fd4e4 Mon Sep 17 00:00:00 2001 From: Jiri Podivin Date: Mon, 6 May 2024 11:26:28 +0200 Subject: [PATCH] Adding basic pre-commit to the repo Checks include basic style and ansible-lint Ansible-lint entry point includes override for the ANSIBLE_ROLES_PATH var, to ensure repo only roles load at runtime. Bash checks on files matching ".*.bash" are disabled as they are often not scripts but rather templates. Linting was applied to all other files. Github action was simplified so that from now on pre-commit takes care of everythin. Signed-off-by: Jiri Podivin --- .github/workflows/ansible-lint.yaml | 19 ---------- .github/workflows/lint.yaml | 15 ++++++++ .pre-commit-config.yaml | 36 +++++++++++++++++++ Makefile | 2 +- .../assemblies/development_environment.adoc | 9 ++++- docs_user/adoption-attributes.adoc | 4 +-- .../assembly_adopting-the-data-plane.adoc | 2 +- .../assembly_adopting-the-image-service.adoc | 6 ++-- ...ssembly_configuring-isolated-networks.adoc | 4 +-- ...figuring-network-for-RHOSO-deployment.adoc | 2 +- ...embly_migrating-ceph-monitoring-stack.adoc | 4 +-- .../assembly_migrating-ceph-rbd.adoc | 3 +- .../assembly_migrating-ceph-rgw.adoc | 3 +- ...ting-monitoring-stack-to-target-nodes.adoc | 2 +- ..._migrating-the-object-storage-service.adoc | 2 +- ...mbly_planning-your-ipam-configuration.adoc | 1 - ...openstack-control-plane-configuration.adoc | 1 - .../modules/con_about-machine-configs.adoc | 2 +- .../modules/con_about-node-selector.adoc | 1 - ...l-provisioning-service-configurations.adoc | 6 ++-- ...service-config-generation-helper-tool.adoc | 3 +- ...con_block-storage-service-limitations.adoc | 1 - ...on_block-storage-service-requirements.adoc | 1 - .../modules/con_ceph-daemon-cardinality.adoc | 14 ++++---- .../con_changes-to-cephFS-via-NFS.adoc | 2 +- ...nfiguration-files-between-deployments.adoc | 2 +- .../con_identity-service-authentication.adoc | 1 - ...er-service-support-for-crypto-plugins.adoc | 2 +- docs_user/modules/con_node-roles.adoc | 2 +- ...reparation-for-block-storage-adoption.adoc | 4 +-- .../con_storage-driver-certification.adoc | 1 - .../modules/proc_adopting-autoscaling.adoc | 4 +-- ...ge-service-with-block-storage-backend.adoc | 2 +- ...pting-image-service-with-ceph-backend.adoc | 6 ++-- ...mage-service-with-nfs-ganesha-backend.adoc | 4 +-- .../proc_adopting-telemetry-services.adoc | 2 +- .../proc_adopting-the-compute-service.adoc | 4 +-- .../proc_adopting-the-networking-service.adoc | 4 +-- ...c_adopting-the-object-storage-service.adoc | 2 +- ...oc_adopting-the-orchestration-service.adoc | 2 +- ...s-for-migrating-ceph-monitoring-stack.adoc | 8 ++--- ...-prerequisites-for-migrating-ceph-rgw.adoc | 2 +- .../proc_configuring-a-ceph-backend.adoc | 6 ++-- .../proc_configuring-data-plane-nodes.adoc | 2 +- ...networking-for-control-plane-services.adoc | 2 +- .../proc_creating-a-ceph-nfs-cluster.adoc | 10 +++--- ...ing-rhosp-standalone-ceph-NFS-service.adoc | 2 +- .../proc_deploying-a-ceph-ingress-daemon.adoc | 9 +++-- ...ng-file-systems-service-control-plane.adoc | 6 ++-- ...g-the-bare-metal-provisioning-service.adoc | 14 ++++---- ..._deploying-the-block-storage-services.adoc | 1 - .../modules/proc_migrating-ceph-mds.adoc | 6 ++-- ...rating-databases-to-mariadb-instances.adoc | 5 ++- ...ting-existing-daemons-to-target-nodes.adoc | 5 ++- ...ing-mon-and-mgr-from-controller-nodes.adoc | 9 ++--- ...ng-object-storage-data-to-rhoso-nodes.adoc | 4 +-- .../proc_migrating-the-rgw-backends.adoc | 2 +- .../proc_migrating-tls-everywhere.adoc | 2 +- ...t-forward-upgrade-on-compute-services.adoc | 2 +- ...-service-by-customizing-configuration.adoc | 1 - ...ck-to-migrate-daemons-to-target-nodes.adoc | 5 ++- ...rmation-from-your-existing-deployment.adoc | 4 --- ...pology-specific-service-configuration.adoc | 2 +- .../proc_reusing-existing-subnet-ranges.adoc | 3 +- .../proc_stopping-openstack-services.adoc | 2 +- .../modules/proc_using-new-subnet-ranges.adoc | 1 - ..._verifying-the-image-service-adoption.adoc | 2 +- .../files/pre_launch.bash | 1 - 68 files changed, 155 insertions(+), 150 deletions(-) delete mode 100644 .github/workflows/ansible-lint.yaml create mode 100644 .github/workflows/lint.yaml create mode 100644 .pre-commit-config.yaml diff --git a/.github/workflows/ansible-lint.yaml b/.github/workflows/ansible-lint.yaml deleted file mode 100644 index fe050cb5d..000000000 --- a/.github/workflows/ansible-lint.yaml +++ /dev/null @@ -1,19 +0,0 @@ -name: Ansible Lint -on: - push: - branches: - - main - pull_request: - branches: - - main -jobs: - test: - name: Ansible Lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Run ansible-lint - uses: ansible/ansible-lint@main - env: - ANSIBLE_ROLES_PATH: /home/runner/work/data-plane-adoption/data-plane-adoption/tests/roles/ diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 000000000..bba48ad9e --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,15 @@ +name: Linting +on: + push: + branches: + - main + pull_request: + branches: + - main +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v3 + - uses: pre-commit/action@v3.0.1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..9dcf22fe6 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,36 @@ +--- +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - id: mixed-line-ending + - id: fix-byte-order-marker + - id: check-executables-have-shebangs + exclude: ".*.bash" # TODO Enable when scripts are consistent + - id: check-merge-conflict + - id: check-symlinks + - id: debug-statements + - id: check-yaml + files: .*\.(yaml|yml)$ + args: [--allow-multiple-documents] + - repo: https://github.com/ansible/ansible-lint + rev: v6.22.1 + hooks: + - id: ansible-lint + entry: env ANSIBLE_ROLES_PATH=./tests/roles:$ANSIBLE_ROLES_PATH ansible-lint + - repo: https://github.com/openstack-dev/bashate.git + rev: 2.1.1 + hooks: + - id: bashate + entry: bashate --error . --ignore=E006,E040 + verbose: false + exclude: ".*.bash" # TODO Enable when scripts are consistent + # Run bashate check for all bash scripts + # Ignores the following rules: + # E006: Line longer than 79 columns (as many scripts use jinja + # templating, this is very difficult) + # E040: Syntax error determined using `bash -n` (as many scripts + # use jinja templating, this will often fail and the syntax + # error will be discovered in execution anyway) diff --git a/Makefile b/Makefile index 015275e70..c1110ebed 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ TEST_ARGS ?= help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) -##@ TESTS +##@ TESTS test-minimal: TEST_OUTFILE := tests/logs/test_minimal_out_$(shell date +%FT%T%Z).log test-minimal: ## Launch minimal test suite diff --git a/docs_dev/assemblies/development_environment.adoc b/docs_dev/assemblies/development_environment.adoc index 4d8319332..613cb272a 100644 --- a/docs_dev/assemblies/development_environment.adoc +++ b/docs_dev/assemblies/development_environment.adoc @@ -7,6 +7,13 @@ Wallaby (or OSP 17.1) OpenStack in Standalone configuration. == Environment prep +Install https://pre-commit.com/[pre-commit hooks] before contributing: +[,bash] +---- +pip install pre-commit +pre-commit install +---- + Get dataplane adoption repo: [,bash] ---- @@ -208,7 +215,7 @@ https://openstack-k8s-operators.github.io/data-plane-adoption/dev/#_reset_the_en === Creating a workload to adopt -To run `openstack` commands from the host without +To run `openstack` commands from the host without installing the package and copying the configuration file from the virtual machine, create an alias: [,bash] diff --git a/docs_user/adoption-attributes.adoc b/docs_user/adoption-attributes.adoc index 6bf97a9a1..0cef075d7 100644 --- a/docs_user/adoption-attributes.adoc +++ b/docs_user/adoption-attributes.adoc @@ -81,8 +81,8 @@ ifeval::["{build}" == "downstream"] :OpenShift: Red Hat OpenShift Container Platform :OpenShiftShort: RHOCP :OpenStackPreviousInstaller: director -:Ceph: Red Hat Ceph Storage -:CephCluster: Red Hat Ceph Storage +:Ceph: Red Hat Ceph Storage +:CephCluster: Red Hat Ceph Storage :CephRelease: 7 //Components and services diff --git a/docs_user/assemblies/assembly_adopting-the-data-plane.adoc b/docs_user/assemblies/assembly_adopting-the-data-plane.adoc index 80c68742d..44c4d8ad1 100644 --- a/docs_user/assemblies/assembly_adopting-the-data-plane.adoc +++ b/docs_user/assemblies/assembly_adopting-the-data-plane.adoc @@ -17,4 +17,4 @@ include::../modules/proc_stopping-infrastructure-management-and-compute-services include::../modules/proc_adopting-compute-services-to-the-data-plane.adoc[leveloffset=+1] -include::../modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc[leveloffset=+1] \ No newline at end of file +include::../modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc[leveloffset=+1] diff --git a/docs_user/assemblies/assembly_adopting-the-image-service.adoc b/docs_user/assemblies/assembly_adopting-the-image-service.adoc index 9e564f66d..c8d6ec897 100644 --- a/docs_user/assemblies/assembly_adopting-the-image-service.adoc +++ b/docs_user/assemblies/assembly_adopting-the-image-service.adoc @@ -11,14 +11,14 @@ configuration parameters provided by the source environment. When the procedure is over, the expectation is to see the `GlanceAPI` service up and running: the {identity_service} endpoints are updated and the same backend of the source Cloud is available. If the conditions above are met, the adoption is considered concluded. -This guide also assumes that: +This guide also assumes that: * A {OpenStackPreviousInstaller} environment (the source Cloud) is running on one side. * A `SNO` / `CodeReadyContainers` is running on the other side. * (optional) An internal/external `Ceph` cluster is reachable by both `crc` and {OpenStackPreviousInstaller}. ifeval::["{build}" != "downstream"] -//This link goes to a 404. Do we need this text downstream? +//This link goes to a 404. Do we need this text downstream? As already done for https://github.com/openstack-k8s-operators/data-plane-adoption/blob/main/keystone_adoption.md[Keystone], the Glance Adoption follows the same pattern. endif::[] @@ -30,4 +30,4 @@ include::../modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc[le include::../modules/proc_adopting-image-service-with-ceph-backend.adoc[leveloffset=+1] -include::../modules/proc_verifying-the-image-service-adoption.adoc[leveloffset=+1] \ No newline at end of file +include::../modules/proc_verifying-the-image-service-adoption.adoc[leveloffset=+1] diff --git a/docs_user/assemblies/assembly_configuring-isolated-networks.adoc b/docs_user/assemblies/assembly_configuring-isolated-networks.adoc index 91f756a63..6a2683797 100644 --- a/docs_user/assemblies/assembly_configuring-isolated-networks.adoc +++ b/docs_user/assemblies/assembly_configuring-isolated-networks.adoc @@ -23,7 +23,7 @@ allocations to be used for the new control plane services: [IMPORTANT] Make sure you have the information listed above before proceeding with the next steps. -[NOTE] +[NOTE] The exact list and configuration of isolated networks in the examples listed below should reflect the actual adopted environment. The number of isolated networks may differ from the example below. IPAM scheme may differ. @@ -35,4 +35,4 @@ include::../modules/proc_configuring-openshift-worker-nodes.adoc[leveloffset=+1] include::../modules/proc_configuring-networking-for-control-plane-services.adoc[leveloffset=+1] -include::../modules/proc_configuring-data-plane-nodes.adoc[leveloffset=+1] \ No newline at end of file +include::../modules/proc_configuring-data-plane-nodes.adoc[leveloffset=+1] diff --git a/docs_user/assemblies/assembly_configuring-network-for-RHOSO-deployment.adoc b/docs_user/assemblies/assembly_configuring-network-for-RHOSO-deployment.adoc index 17d9f475c..1d09d21ea 100644 --- a/docs_user/assemblies/assembly_configuring-network-for-RHOSO-deployment.adoc +++ b/docs_user/assemblies/assembly_configuring-network-for-RHOSO-deployment.adoc @@ -44,4 +44,4 @@ include::../modules/proc_retrieving-network-information-from-your-existing-deplo include::../assemblies/assembly_planning-your-ipam-configuration.adoc[leveloffset=+1] -include::../assemblies/assembly_configuring-isolated-networks.adoc[leveloffset=+1] \ No newline at end of file +include::../assemblies/assembly_configuring-isolated-networks.adoc[leveloffset=+1] diff --git a/docs_user/assemblies/assembly_migrating-ceph-monitoring-stack.adoc b/docs_user/assemblies/assembly_migrating-ceph-monitoring-stack.adoc index 5960b4791..fad8e444e 100644 --- a/docs_user/assemblies/assembly_migrating-ceph-monitoring-stack.adoc +++ b/docs_user/assemblies/assembly_migrating-ceph-monitoring-stack.adoc @@ -2,7 +2,7 @@ :context: migrating-ceph-monitoring -= Migrating the monitoring stack component to new nodes within an existing {Ceph} cluster += Migrating the monitoring stack component to new nodes within an existing {Ceph} cluster In the context of data plane adoption, where the {rhos_prev_long} ({OpenStackShort}) services are redeployed in {OpenShift}, a {OpenStackPreviousInstaller}-deployed {CephCluster} cluster will undergo a migration in a process we are calling “externalizing” the {CephCluster} cluster. @@ -32,5 +32,3 @@ We assume that: include::../modules/proc_completing-prerequisites-for-migrating-ceph-monitoring-stack.adoc[leveloffset=+1] include::../assemblies/assembly_migrating-monitoring-stack-to-target-nodes.adoc[leveloffset=+1] - - diff --git a/docs_user/assemblies/assembly_migrating-ceph-rbd.adoc b/docs_user/assemblies/assembly_migrating-ceph-rbd.adoc index 1b5cffd46..9ddf940d7 100644 --- a/docs_user/assemblies/assembly_migrating-ceph-rbd.adoc +++ b/docs_user/assemblies/assembly_migrating-ceph-rbd.adoc @@ -9,9 +9,8 @@ For hyperconverged infrastructure (HCI) or dedicated Storage nodes that are runn To migrate Red Hat Ceph Storage Rados Block Device (RBD), your environment must meet the following requirements: * {Ceph} is running version 6 or later and is managed by cephadm/orchestrator. -* NFS (ganesha) is migrated from a {OpenStackPreviousInstaller}-based deployment to cephadm. For more information, see xref:creating-a-ceph-nfs-cluster_migrating-databases[Creating a NFS Ganesha cluster]. +* NFS (ganesha) is migrated from a {OpenStackPreviousInstaller}-based deployment to cephadm. For more information, see xref:creating-a-ceph-nfs-cluster_migrating-databases[Creating a NFS Ganesha cluster]. * Both the {Ceph} public and cluster networks are propagated, with {OpenStackPreviousInstaller}, to the target nodes. * Ceph Monitors need to keep their IPs to avoid cold migration. include::../modules/proc_migrating-mon-and-mgr-from-controller-nodes.adoc[leveloffset=+1] - diff --git a/docs_user/assemblies/assembly_migrating-ceph-rgw.adoc b/docs_user/assemblies/assembly_migrating-ceph-rgw.adoc index 0a176452d..3116e242a 100644 --- a/docs_user/assemblies/assembly_migrating-ceph-rgw.adoc +++ b/docs_user/assemblies/assembly_migrating-ceph-rgw.adoc @@ -4,7 +4,7 @@ = Migrating {Ceph} RGW to external RHEL nodes -For hyperconverged infrastructure (HCI) or dedicated Storage nodes that are running {Ceph} version 6 or later, you must migrate the RGW daemons that are included in the {rhos_prev_long} Controller nodes into the existing external Red Hat Enterprise Linux (RHEL) nodes. The existing external RHEL nodes typically include the Compute nodes for an HCI environment or {Ceph} nodes. +For hyperconverged infrastructure (HCI) or dedicated Storage nodes that are running {Ceph} version 6 or later, you must migrate the RGW daemons that are included in the {rhos_prev_long} Controller nodes into the existing external Red Hat Enterprise Linux (RHEL) nodes. The existing external RHEL nodes typically include the Compute nodes for an HCI environment or {Ceph} nodes. To migrate Ceph Object Gateway (RGW), your environment must meet the following requirements: @@ -20,4 +20,3 @@ include::../modules/proc_migrating-the-rgw-backends.adoc[leveloffset=+1] include::../modules/proc_deploying-a-ceph-ingress-daemon.adoc[leveloffset=+1] include::../modules/proc_updating-the-object-storage-endpoints.adoc[leveloffset=+1] - diff --git a/docs_user/assemblies/assembly_migrating-monitoring-stack-to-target-nodes.adoc b/docs_user/assemblies/assembly_migrating-monitoring-stack-to-target-nodes.adoc index ba039e3f0..24f91b894 100644 --- a/docs_user/assemblies/assembly_migrating-monitoring-stack-to-target-nodes.adoc +++ b/docs_user/assemblies/assembly_migrating-monitoring-stack-to-target-nodes.adoc @@ -31,4 +31,4 @@ include::../modules/proc_migrating-existing-daemons-to-target-nodes.adoc[levelof ifeval::["{build}" != "downstream"] include::../modules/proc_relocating-one-instance-of-a-monitoring-stack-to-migrate-daemons-to-target-nodes.adoc[leveloffset=+1] -endif::[] \ No newline at end of file +endif::[] diff --git a/docs_user/assemblies/assembly_migrating-the-object-storage-service.adoc b/docs_user/assemblies/assembly_migrating-the-object-storage-service.adoc index 8ee71d20e..7f9f4acd8 100644 --- a/docs_user/assemblies/assembly_migrating-the-object-storage-service.adoc +++ b/docs_user/assemblies/assembly_migrating-the-object-storage-service.adoc @@ -12,4 +12,4 @@ Migration of the data happens replica by replica. Assuming you start with 3 repl include::../modules/proc_migrating-object-storage-data-to-rhoso-nodes.adoc[leveloffset=+1] -include::../modules/con_troubleshooting-object-storage-migration.adoc[leveloffset=+1] \ No newline at end of file +include::../modules/con_troubleshooting-object-storage-migration.adoc[leveloffset=+1] diff --git a/docs_user/assemblies/assembly_planning-your-ipam-configuration.adoc b/docs_user/assemblies/assembly_planning-your-ipam-configuration.adoc index 7b9353e5a..c8acb8346 100644 --- a/docs_user/assemblies/assembly_planning-your-ipam-configuration.adoc +++ b/docs_user/assemblies/assembly_planning-your-ipam-configuration.adoc @@ -67,4 +67,3 @@ Regardless of the IPAM scenario, the VLAN tags used in the existing deployment w include::../modules/proc_using-new-subnet-ranges.adoc[leveloffset=+1] include::../modules/proc_reusing-existing-subnet-ranges.adoc[leveloffset=+1] - diff --git a/docs_user/assemblies/assembly_reviewing-the-openstack-control-plane-configuration.adoc b/docs_user/assemblies/assembly_reviewing-the-openstack-control-plane-configuration.adoc index 8b7d84930..9a4f3b866 100644 --- a/docs_user/assemblies/assembly_reviewing-the-openstack-control-plane-configuration.adoc +++ b/docs_user/assemblies/assembly_reviewing-the-openstack-control-plane-configuration.adoc @@ -10,4 +10,3 @@ Make sure you installed and configured the os-diff tool. For more information, s xref:comparing-configuration-files-between-deployments_storage-requirements[Comparing configuration files between deployments]. include::../modules/proc_pulling-configuration-from-a-tripleo-deployment.adoc[leveloffset=+1] - diff --git a/docs_user/modules/con_about-machine-configs.adoc b/docs_user/modules/con_about-machine-configs.adoc index 82a3a4ba5..37dcfee6d 100644 --- a/docs_user/modules/con_about-machine-configs.adoc +++ b/docs_user/modules/con_about-machine-configs.adoc @@ -1,6 +1,6 @@ [id="about-machine-configs_{context}"] -= About machine configs += About machine configs Some services require you to have services or kernel modules running on the hosts where they run, for example `iscsid` or `multipathd` daemons, or the `nvme-fabrics` kernel module. diff --git a/docs_user/modules/con_about-node-selector.adoc b/docs_user/modules/con_about-node-selector.adoc index 258865437..dcf7779ad 100644 --- a/docs_user/modules/con_about-node-selector.adoc +++ b/docs_user/modules/con_about-node-selector.adoc @@ -91,4 +91,3 @@ the `nodeSelector` in `cinderVolumes`, so you need to specify it on each of the backends. It is possible to leverage labels added by the Node Feature Discovery (NFD) Operator to place {OpenStackShort} services. For more information, see link:https://docs.openshift.com/container-platform/4.13/hardware_enablement/psap-node-feature-discovery-operator.html[Node Feature Discovery Operator] in _OpenShift Container Platform 4.15 Documentation_. - diff --git a/docs_user/modules/con_bare-metal-provisioning-service-configurations.adoc b/docs_user/modules/con_bare-metal-provisioning-service-configurations.adoc index e7022632c..6733956af 100644 --- a/docs_user/modules/con_bare-metal-provisioning-service-configurations.adoc +++ b/docs_user/modules/con_bare-metal-provisioning-service-configurations.adoc @@ -2,7 +2,7 @@ = Bare Metal Provisioning service configurations -The {bare_metal_first_ref} is configured by using configuration snippets. For more information about the configuration snippets, see xref:service-configurations_planning[Service configurations]. +The {bare_metal_first_ref} is configured by using configuration snippets. For more information about the configuration snippets, see xref:service-configurations_planning[Service configurations]. {OpenStackPreviousInstaller} generally took care to not override the defaults of the {bare_metal}, however as with any system of descreet configuration management attempting to provide a cross-version compatability layer, some configuration was certainly defaulted in particular ways. For example, PXE Loader file names were often overridden at intermediate layers, and you will thus want to pay particular attention to the settings you choose to apply in your adopted deployment. The operator attempts to apply reasonable working default configuration, but if you override them with prior configuration, your experience may not be ideal or your new {bare_metal} will fail to operate. Similarly, additional configuration may be necessary, for example if your `ironic.conf` has additional hardware types enabled and in use. @@ -39,5 +39,5 @@ Finally, a parameter which may be important based upon your configuration and ex As a warning, hardware types set via the `ironic.conf` `enabled_hardware_types` parameter and hardware type driver interfaces starting with `staging-` are not available to be migrated into an adopted configuration. -Furthermore, {OpenStackPreviousInstaller}-based deployments made architectural decisions based upon self-management of services. When adopting deployments, you don't necessarilly need multiple replicas of secondary services such as the Introspection service. Should the host the container is running upon fail, {OpenShift} will restart the container on another host. The short-term transitory loss -//kgilliga: This last sentence trails off. \ No newline at end of file +Furthermore, {OpenStackPreviousInstaller}-based deployments made architectural decisions based upon self-management of services. When adopting deployments, you don't necessarilly need multiple replicas of secondary services such as the Introspection service. Should the host the container is running upon fail, {OpenShift} will restart the container on another host. The short-term transitory loss +//kgilliga: This last sentence trails off. diff --git a/docs_user/modules/con_block-storage-service-config-generation-helper-tool.adoc b/docs_user/modules/con_block-storage-service-config-generation-helper-tool.adoc index 85a3eabc0..158a16c74 100644 --- a/docs_user/modules/con_block-storage-service-config-generation-helper-tool.adoc +++ b/docs_user/modules/con_block-storage-service-config-generation-helper-tool.adoc @@ -9,7 +9,7 @@ helper tool that can create a draft of the files from a `cinder.conf` file. This tool is not meant to be a automation tool. It is mostly to help you get the gist of it, maybe point out some potential pitfalls and reminders. -[IMPORTANT] +[IMPORTANT] The tools requires `PyYAML` Python package to be installed (`pip install PyYAML`). @@ -91,4 +91,3 @@ configuration because it has sensitive information (credentials). The customServiceConfigSecrets: - openstackcinder-volumes-hpe_fc ---- - diff --git a/docs_user/modules/con_block-storage-service-limitations.adoc b/docs_user/modules/con_block-storage-service-limitations.adoc index 7daca49d9..800cb5ed1 100644 --- a/docs_user/modules/con_block-storage-service-limitations.adoc +++ b/docs_user/modules/con_block-storage-service-limitations.adoc @@ -14,4 +14,3 @@ nodes, is not currently being documented in this process. * Support for {block_storage} backends that require kernel modules not included in RHEL has not been tested in Operator deployed {rhos_prev_long}. * Adoption of DCN/Edge deployment is not currently described in this guide. - diff --git a/docs_user/modules/con_block-storage-service-requirements.adoc b/docs_user/modules/con_block-storage-service-requirements.adoc index 9083a6d0d..2903bd5b1 100644 --- a/docs_user/modules/con_block-storage-service-requirements.adoc +++ b/docs_user/modules/con_block-storage-service-requirements.adoc @@ -22,4 +22,3 @@ Once you know all the transport protocols that you are using, you can make sure that you are taking them into consideration when placing the Block Storage services (as mentioned above in the Node Roles section) and the right storage transport related binaries are running on the {OpenShift} nodes. Detailed information about the specifics for each storage transport protocol can be found in the xref:openshift-preparation-for-block-storage-adoption_adopting-block-storage[{OpenShift} preparation for {block_storage} adoption]. - diff --git a/docs_user/modules/con_ceph-daemon-cardinality.adoc b/docs_user/modules/con_ceph-daemon-cardinality.adoc index a3c4e8457..8ed18b3ff 100644 --- a/docs_user/modules/con_ceph-daemon-cardinality.adoc +++ b/docs_user/modules/con_ceph-daemon-cardinality.adoc @@ -2,18 +2,18 @@ = {Ceph} daemon cardinality -{Ceph} 6 and later applies strict constraints in the way daemons can be colocated within the same node. +{Ceph} 6 and later applies strict constraints in the way daemons can be colocated within the same node. ifeval::["{build}" != "upstream"] -For more information, see link:https://access.redhat.com/articles/1548993[Red Hat Ceph Storage: Supported configurations]. +For more information, see link:https://access.redhat.com/articles/1548993[Red Hat Ceph Storage: Supported configurations]. endif::[] -The resulting topology depends on the available hardware, as well as the amount of {Ceph} services present in the Controller nodes which are going to be retired. +The resulting topology depends on the available hardware, as well as the amount of {Ceph} services present in the Controller nodes which are going to be retired. ifeval::["{build}" != "upstream"] -For more information about the procedure that is required to migrate the RGW component and keep an HA model using the Ceph ingress daemon, see link:{defaultCephURL}/object_gateway_guide/index#high-availability-for-the-ceph-object-gateway[High availability for the Ceph Object Gateway] in _Object Gateway Guide_. +For more information about the procedure that is required to migrate the RGW component and keep an HA model using the Ceph ingress daemon, see link:{defaultCephURL}/object_gateway_guide/index#high-availability-for-the-ceph-object-gateway[High availability for the Ceph Object Gateway] in _Object Gateway Guide_. endif::[] -ifeval::["{build}" != "downstream"] +ifeval::["{build}" != "downstream"] The following document describes the procedure required to migrate the RGW component (and keep an HA model using the https://docs.ceph.com/en/latest/cephadm/services/rgw/#high-availability-service-for-rgw[Ceph Ingress daemon] in a common {OpenStackPreviousInstaller} scenario where Controller nodes represent the https://github.com/openstack/tripleo-ansible/blob/master/tripleo_ansible/roles/tripleo_cephadm/tasks/rgw.yaml#L26-L30[spec placement] where the service is deployed. -endif::[] +endif::[] As a general rule, the number of services that can be migrated depends on the number of available nodes in the cluster. The following diagrams cover the distribution of the {Ceph} daemons on the {Ceph} nodes where at least three nodes are required in a scenario that sees only RGW and RBD, without the {dashboard_first_ref}: ---- @@ -45,4 +45,4 @@ With the {dashboard} and the {rhos_component_storage_file}, 5 nodes minimum are | osd | mon/mgr/crash | mds/ganesha/ingress | | osd | rgw/ingress | mds/ganesha/ingress | | osd | mds/ganesha/ingress | dashboard/grafana | ----- \ No newline at end of file +---- diff --git a/docs_user/modules/con_changes-to-cephFS-via-NFS.adoc b/docs_user/modules/con_changes-to-cephFS-via-NFS.adoc index a3820b883..5f3197663 100644 --- a/docs_user/modules/con_changes-to-cephFS-via-NFS.adoc +++ b/docs_user/modules/con_changes-to-cephFS-via-NFS.adoc @@ -19,4 +19,4 @@ will correspond to the new clustered Ceph NFS service in contrast to other non-preferred export paths that continue to be displayed until the old isolated, standalone NFS service is decommissioned. -See xref:creating-a-ceph-nfs-cluster_migrating-databases[Creating a NFS Ganesha cluster] for instructions on setting up a clustered NFS service. \ No newline at end of file +See xref:creating-a-ceph-nfs-cluster_migrating-databases[Creating a NFS Ganesha cluster] for instructions on setting up a clustered NFS service. diff --git a/docs_user/modules/con_comparing-configuration-files-between-deployments.adoc b/docs_user/modules/con_comparing-configuration-files-between-deployments.adoc index afd97deb8..d27639c5e 100644 --- a/docs_user/modules/con_comparing-configuration-files-between-deployments.adoc +++ b/docs_user/modules/con_comparing-configuration-files-between-deployments.adoc @@ -133,4 +133,4 @@ And test your connection: ---- ssh -F ssh.config standalone ----- \ No newline at end of file +---- diff --git a/docs_user/modules/con_identity-service-authentication.adoc b/docs_user/modules/con_identity-service-authentication.adoc index 76c70ffc3..04c48c931 100644 --- a/docs_user/modules/con_identity-service-authentication.adoc +++ b/docs_user/modules/con_identity-service-authentication.adoc @@ -5,4 +5,3 @@ When you adopt a {OpenStackPreviousInstaller} {rhos_prev_long} ({OpenStackShort}) deployment, users authenticate to the Identity service (keystone) by using Secure RBAC (SRBAC). There is no change to how you perform operations if SRBAC is enabled. If SRBAC is not enabled, then adopting a {OpenStackPreviousInstaller} {OpenStackShort} deployment changes how you perform operations, such as adding roles to users. If you have custom policies enabled, contact support before adopting a {OpenStackPreviousInstaller} {OpenStackShort} deployment. // For more information on SRBAC see [link]. - diff --git a/docs_user/modules/con_key-manager-service-support-for-crypto-plugins.adoc b/docs_user/modules/con_key-manager-service-support-for-crypto-plugins.adoc index ea3c44780..2a8d834b2 100644 --- a/docs_user/modules/con_key-manager-service-support-for-crypto-plugins.adoc +++ b/docs_user/modules/con_key-manager-service-support-for-crypto-plugins.adoc @@ -7,4 +7,4 @@ The Key Manager service (barbican) does not yet support all of the crypto plug-i //**TODO: Right now Barbican only supports the simple crypto plugin. //*TODO: Talk about Ceph Storage and Swift Storage nodes, HCI deployments, -//etc.* \ No newline at end of file +//etc.* diff --git a/docs_user/modules/con_node-roles.adoc b/docs_user/modules/con_node-roles.adoc index fbabb5760..95b397b9d 100644 --- a/docs_user/modules/con_node-roles.adoc +++ b/docs_user/modules/con_node-roles.adoc @@ -48,4 +48,4 @@ The {OpenStackShort} Operators allow a great deal of flexibility on where to run {OpenStackShort} services, as you can use node labels to define which {OpenShiftShort} nodes are eligible to run the different {OpenStackShort} services. Refer to the xref:about-node-selector_{context}[About node selector] to learn more about using labels to define -placement of the {OpenStackShort} services. \ No newline at end of file +placement of the {OpenStackShort} services. diff --git a/docs_user/modules/con_openshift-preparation-for-block-storage-adoption.adoc b/docs_user/modules/con_openshift-preparation-for-block-storage-adoption.adoc index 2529bde2a..355a0de26 100644 --- a/docs_user/modules/con_openshift-preparation-for-block-storage-adoption.adoc +++ b/docs_user/modules/con_openshift-preparation-for-block-storage-adoption.adoc @@ -253,14 +253,14 @@ spec: + If you are using labels to restrict the nodes where Block Storage services are running you need to use a `MachineConfigPool` as described in -the xref:about-node-selector_planning[About node selector] to limit the effects of the +the xref:about-node-selector_planning[About node selector] to limit the effects of the `MachineConfig` to only the nodes where your services may run. + If you are using a single node deployment to test the process, replace `worker` with `master` in the `MachineConfig`. + To configure the Block Storage services to use multipathing, enable the `use_multipath_for_image_xfer` configuration option in all the backend sections -and in the `[DEFAULT]` section for the backup service. This is the default in control plane deployments. Multipathing works as long as the service is running on the {OpenShiftShort} host. Do not override this option by setting `use_multipath_for_image_xfer = false`. +and in the `[DEFAULT]` section for the backup service. This is the default in control plane deployments. Multipathing works as long as the service is running on the {OpenShiftShort} host. Do not override this option by setting `use_multipath_for_image_xfer = false`. //*TODO:* Add, or at least mention, the Nova eDPM side for Multipathing once //it's implemented. diff --git a/docs_user/modules/con_storage-driver-certification.adoc b/docs_user/modules/con_storage-driver-certification.adoc index 8b73b986e..beb6c740c 100644 --- a/docs_user/modules/con_storage-driver-certification.adoc +++ b/docs_user/modules/con_storage-driver-certification.adoc @@ -5,4 +5,3 @@ Before you adopt your {rhos_prev_long} {rhos_prev_ver} deployment to a {rhos_long} {rhos_curr_ver} deployment, confirm that your deployed storage drivers are certified for use with {rhos_acro} {rhos_curr_ver}. //kgilliga: Note to self: I need to add a link to the certified software website for GA. The website currently does not include RHOSO 18.0 certified software. - diff --git a/docs_user/modules/proc_adopting-autoscaling.adoc b/docs_user/modules/proc_adopting-autoscaling.adoc index 5316055f1..7dea62cf2 100644 --- a/docs_user/modules/proc_adopting-autoscaling.adoc +++ b/docs_user/modules/proc_adopting-autoscaling.adoc @@ -12,7 +12,7 @@ This guide also assumes that: .Prerequisites * Previous Adoption steps completed. MariaDB, the {identity_service_first_ref}, the {orchestration_first_ref}, and Telemetry -should be already adopted. +should be already adopted. //kgilliga: Should this procedure be moved to after the data plane adoption chapter? .Procedure @@ -73,7 +73,7 @@ AODH_POD=`oc get pods -l service=aodh | tail -n 1 | cut -f 1 -d' '` oc exec -t $AODH_POD -c aodh-api -- cat /etc/aodh/aodh.conf ---- -. Check whether Aodh API service is registered in {identity_service}: +. Check whether Aodh API service is registered in {identity_service}: + ---- openstack endpoint list | grep aodh diff --git a/docs_user/modules/proc_adopting-image-service-with-block-storage-backend.adoc b/docs_user/modules/proc_adopting-image-service-with-block-storage-backend.adoc index 6b41d5c67..9c2193d78 100644 --- a/docs_user/modules/proc_adopting-image-service-with-block-storage-backend.adoc +++ b/docs_user/modules/proc_adopting-image-service-with-block-storage-backend.adoc @@ -33,7 +33,7 @@ should be already adopted. .Procedure . Write the patch manifest into a file, for example `glance_cinder.patch`. -For example: +For example: + ---- spec: diff --git a/docs_user/modules/proc_adopting-image-service-with-ceph-backend.adoc b/docs_user/modules/proc_adopting-image-service-with-ceph-backend.adoc index 8bce7aee8..ab6b5ceca 100644 --- a/docs_user/modules/proc_adopting-image-service-with-ceph-backend.adoc +++ b/docs_user/modules/proc_adopting-image-service-with-ceph-backend.adoc @@ -2,7 +2,7 @@ = Adopting the {image_service} that is deployed with a {Ceph} backend -Adopt the {image_service_first_ref} that you deployed with a {Ceph} backend. Use the `customServiceConfig` parameter to inject the right configuration to the `GlanceAPI` instance. +Adopt the {image_service_first_ref} that you deployed with a {Ceph} backend. Use the `customServiceConfig` parameter to inject the right configuration to the `GlanceAPI` instance. .Prerequisites @@ -52,7 +52,7 @@ EOF [NOTE] ==== If you have previously backed up your {OpenStackShort} services configuration file from the old environment, you can use os-diff to compare and make sure the configuration is correct. -For more information, see xref:pulling-configuration-from-tripleo-deployment_reviewing-configuration[Pulling the configuration from a {OpenStackPreviousInstaller} deployment]. +For more information, see xref:pulling-configuration-from-tripleo-deployment_reviewing-configuration[Pulling the configuration from a {OpenStackPreviousInstaller} deployment]. ---- os-diff diff /tmp/collect_tripleo_configs/glance/etc/glance/glance-api.conf glance_patch.yaml --crd @@ -67,4 +67,4 @@ This produces the difference between both ini configuration files. + ---- $ oc patch openstackcontrolplane openstack --type=merge --patch-file glance_patch.yaml ----- \ No newline at end of file +---- diff --git a/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc b/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc index d1a207cb7..bc4df4d9e 100644 --- a/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc +++ b/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc @@ -27,7 +27,7 @@ In particular, find among the {OpenStackPreviousInstaller} heat templates the fo + In the example above, as the first variable shows, the {image_service} has no notion of NFS Ganesha backend: the `File` driver is used in this scenario, and behind the scenes, the `filesystem_store_datadir` which usually points to `/var/lib/glance/images/` is mapped to the export value provided by the `GlanceNfsShare` variable. If the `GlanceNfsShare` is not exported through a network that is supposed to be propagated to the adopted {rhos_prev_long} control plane, an extra action is required by the human administrator, who must stop the `nfs-server` and remap the export to the `storage` network. This action usually happens when the {image_service} is stopped in the source Controller nodes. -ifeval::["{build}" != "downstream"] +ifeval::["{build}" != "downstream"] In the control plane, as per the (https://github.com/openstack-k8s-operators/docs/blob/main/images/network_diagram.jpg)[network isolation diagram], the {image_service} is attached to the Storage network, propagated via the associated `NetworkAttachmentsDefinition` custom resource, and the resulting Pods have already the right permissions to handle the {image_service} traffic through this network. endif::[] @@ -121,7 +121,7 @@ to reach the `nfs-server` and make sure the `nfs/path` points to the exported path in the `nfs-server`. . Patch `OpenStackControlPlane` to deploy {image_service} with a NFS Ganesha backend: -+ ++ ---- $ oc patch openstackcontrolplane openstack --type=merge --patch-file glance_nfs_patch.yaml ---- diff --git a/docs_user/modules/proc_adopting-telemetry-services.adoc b/docs_user/modules/proc_adopting-telemetry-services.adoc index cb2bd6a42..e5e1b874a 100644 --- a/docs_user/modules/proc_adopting-telemetry-services.adoc +++ b/docs_user/modules/proc_adopting-telemetry-services.adoc @@ -11,7 +11,7 @@ This guide also assumes that: .Prerequisites -* Previous Adoption steps completed. MariaDB, the {identity_service_first_ref} and the data plane should be already adopted. +* Previous Adoption steps completed. MariaDB, the {identity_service_first_ref} and the data plane should be already adopted. //kgilliga:Should this procedure be moved after the "Adopting the data plane" chapter? .Procedure diff --git a/docs_user/modules/proc_adopting-the-compute-service.adoc b/docs_user/modules/proc_adopting-the-compute-service.adoc index 0446e4bba..602e1c862 100644 --- a/docs_user/modules/proc_adopting-the-compute-service.adoc +++ b/docs_user/modules/proc_adopting-the-compute-service.adoc @@ -19,7 +19,7 @@ must already be imported into the control plane MariaDB; ** the xref:adopting-the-image-service_adopt-control-plane[Adopting the Image service] needs to be imported; ** the xref:migrating-ovn-data_migrating-databases[Migrating OVN data] need to be imported; ** the xref:adopting-the-networking-service_adopt-control-plane[Adopting the Networking service] needs to be imported; -** the {bare_metal} needs to be imported; +** the {bare_metal} needs to be imported; //kgilliga:I removed the link because it did not work. I might rewrite this section anyway. ** Required topology-specific service configuration. For more information, see xref:proc_retrieving-topology-specific-service-configuration_migrating-databases[Retrieving topology-specific service configuration]. @@ -132,7 +132,7 @@ oc wait --for condition=Ready --timeout=300s Nova/nova The local Conductor services will be started for each cell, while the superconductor runs in `cell0`. Note that `disable_compute_service_check_for_ffu` is mandatory for all imported Nova services, until the external data plane is imported, and until Nova Compute services fast-forward upgraded. For more information, see xref:adopting-compute-services-to-the-data-plane_data-plane[Adopting Compute services to the {rhos_acro} data plane] and xref:performing-a-fast-forward-upgrade-on-compute-services_data-plane[Performing a fast-forward upgrade on Compute services]. -.Verification +.Verification * Check that {compute_service} endpoints are defined and pointing to the control plane FQDNs and that Nova API responds. diff --git a/docs_user/modules/proc_adopting-the-networking-service.adoc b/docs_user/modules/proc_adopting-the-networking-service.adoc index 51ee80227..bf8f5cc55 100644 --- a/docs_user/modules/proc_adopting-the-networking-service.adoc +++ b/docs_user/modules/proc_adopting-the-networking-service.adoc @@ -20,7 +20,7 @@ should be already adopted. .Procedure //The following link takes me to a 404. Do we need this text? I think we should start the procedure at "Patch OpenStackControlPlane..." -ifeval::["{build}" != "downstream"] +ifeval::["{build}" != "downstream"] As already done for https://github.com/openstack-k8s-operators/data-plane-adoption/blob/main/keystone_adoption.md[Keystone], the Neutron Adoption follows the same pattern. endif::[] @@ -61,7 +61,7 @@ NEUTRON_API_POD=`oc get pods -l service=neutron | tail -n 1 | cut -f 1 -d' '` oc exec -t $NEUTRON_API_POD -c neutron-api -- cat /etc/neutron/neutron.conf ---- -* Check that the `Neutron API` service is registered in the {identity_service}: +* Check that the `Neutron API` service is registered in the {identity_service}: + ---- $ openstack service list | grep network diff --git a/docs_user/modules/proc_adopting-the-object-storage-service.adoc b/docs_user/modules/proc_adopting-the-object-storage-service.adoc index 6ab6e2b6f..77e4e285a 100644 --- a/docs_user/modules/proc_adopting-the-object-storage-service.adoc +++ b/docs_user/modules/proc_adopting-the-object-storage-service.adoc @@ -133,6 +133,6 @@ openstack object save test obj --file - Hello World! ---- -[NOTE] +[NOTE] At this point data is still stored on the previously existing nodes. For more information about migrating the actual data from the old to the new deployment, see xref:migrating-object-storage-data-to-rhoso-nodes_migrate-object-storage-service[Migrating the {object_storage_first_ref} data from {OpenStackShort} to {rhos_long} nodes]. diff --git a/docs_user/modules/proc_adopting-the-orchestration-service.adoc b/docs_user/modules/proc_adopting-the-orchestration-service.adoc index d6f77b2ee..796b435ec 100644 --- a/docs_user/modules/proc_adopting-the-orchestration-service.adoc +++ b/docs_user/modules/proc_adopting-the-orchestration-service.adoc @@ -25,7 +25,7 @@ such as {networking_first_ref}, {compute_service_first_ref}, {object_storage_fir trying to adopt {orchestration}. .Procedure -ifeval::["{build}" != "downstream"] +ifeval::["{build}" != "downstream"] As already done for https://github.com/openstack-k8s-operators/data-plane-adoption/blob/main/keystone_adoption.md[Keystone], the Heat Adoption follows a similar pattern. endif::[] diff --git a/docs_user/modules/proc_completing-prerequisites-for-migrating-ceph-monitoring-stack.adoc b/docs_user/modules/proc_completing-prerequisites-for-migrating-ceph-monitoring-stack.adoc index b2830d662..3b508a7bc 100644 --- a/docs_user/modules/proc_completing-prerequisites-for-migrating-ceph-monitoring-stack.adoc +++ b/docs_user/modules/proc_completing-prerequisites-for-migrating-ceph-monitoring-stack.adoc @@ -8,10 +8,10 @@ You must complete the following prerequisites before you migrate a {Ceph} cluste . Gather the current status of the monitoring stack. Verify that the hosts have no `monitoring` label (or `grafana`, `prometheus`, `alertmanager` -in case of a per daemons placement evaluation) associated: +in case of a per daemons placement evaluation) associated: [NOTE] The entire relocation process is driven by cephadm and relies on labels to be -assigned to the target nodes, where the daemons are scheduled. +assigned to the target nodes, where the daemons are scheduled. ifeval::["{build}" != "upstream"] Review the https://access.redhat.com/articles/1548993[cardinality matrix] before assigning labels and choose carefully the nodes where the monitoring stack components should be scheduled on. @@ -37,7 +37,7 @@ Confirm that the cluster is healthy and both `ceph orch ls` and plane has been migrated, it’s important to consider updating the container images referenced in the {CephCluster} cluster config. The current container images point to the undercloud registry, and it might be no longer available. As the undercloud won’t be available in the future, replace the undercloud provided images with an alternative registry. -ifeval::["{build}" != "downstream"] +ifeval::["{build}" != "downstream"] In case the desired option is to rely on the https://github.com/ceph/ceph/blob/reef/src/cephadm/cephadm.py#L48[default images] shipped by cephadm, remove the following config options from the {CephCluster} cluster. endif::[] @@ -80,5 +80,3 @@ container images, we update the config entry related to the container_image_base. This has an impact on all the {Ceph} daemons that rely on the undercloud images. New daemons will be deployed using the new/default {Ceph} image. - - diff --git a/docs_user/modules/proc_completing-prerequisites-for-migrating-ceph-rgw.adoc b/docs_user/modules/proc_completing-prerequisites-for-migrating-ceph-rgw.adoc index 3e1fe7e5f..26415381c 100644 --- a/docs_user/modules/proc_completing-prerequisites-for-migrating-ceph-rgw.adoc +++ b/docs_user/modules/proc_completing-prerequisites-for-migrating-ceph-rgw.adoc @@ -27,7 +27,7 @@ You must complete the following prerequisites before you begin the {Ceph} RGW mi ---- . Log in to `controller-0` and check the `pacemaker` status to help you -identify the information that you need before you start the RGW migration. +identify the information that you need before you start the RGW migration. + ---- Full List of Resources: diff --git a/docs_user/modules/proc_configuring-a-ceph-backend.adoc b/docs_user/modules/proc_configuring-a-ceph-backend.adoc index 1ff7090b9..a478372e5 100644 --- a/docs_user/modules/proc_configuring-a-ceph-backend.adoc +++ b/docs_user/modules/proc_configuring-a-ceph-backend.adoc @@ -17,7 +17,7 @@ became far simpler and hence, more became more secure with {rhos_acro} {rhos_cur * It is simpler to create a common ceph secret (keyring and ceph config file) and propagate the secret to all services that need it. -[TIP] +[TIP] To run `ceph` commands, you must use SSH to connect to a Ceph storage node and run `sudo cephadm shell`. This brings up a ceph orchestrator container that allows you to run administrative commands against the ceph @@ -33,7 +33,7 @@ ceph auth caps client.openstack \ .Prerequisites * The `OpenStackControlPlane` custom resource (CR) must already exist. -* Define the following shell variables. The values that are used are examples. Replace these example values with values that are correct for your environment: +* Define the following shell variables. The values that are used are examples. Replace these example values with values that are correct for your environment: [subs=+quotes] ---- @@ -115,5 +115,3 @@ spec: readOnly: true ' ---- - - diff --git a/docs_user/modules/proc_configuring-data-plane-nodes.adoc b/docs_user/modules/proc_configuring-data-plane-nodes.adoc index fff759a3f..925ad643c 100644 --- a/docs_user/modules/proc_configuring-data-plane-nodes.adoc +++ b/docs_user/modules/proc_configuring-data-plane-nodes.adoc @@ -87,4 +87,4 @@ spec: vlan: 20 ``` -The example above would exclude the `172.17.0.200` address from the pool. \ No newline at end of file +The example above would exclude the `172.17.0.200` address from the pool. diff --git a/docs_user/modules/proc_configuring-networking-for-control-plane-services.adoc b/docs_user/modules/proc_configuring-networking-for-control-plane-services.adoc index eab300a13..34f42f522 100644 --- a/docs_user/modules/proc_configuring-networking-for-control-plane-services.adoc +++ b/docs_user/modules/proc_configuring-networking-for-control-plane-services.adoc @@ -101,4 +101,4 @@ For example, The example above would exclude the `172.17.0.65` address from the allocation pool. -// TODO: is there anything specific to mention about BGP L3 mode here? \ No newline at end of file +// TODO: is there anything specific to mention about BGP L3 mode here? diff --git a/docs_user/modules/proc_creating-a-ceph-nfs-cluster.adoc b/docs_user/modules/proc_creating-a-ceph-nfs-cluster.adoc index c15d26f8d..101144598 100644 --- a/docs_user/modules/proc_creating-a-ceph-nfs-cluster.adoc +++ b/docs_user/modules/proc_creating-a-ceph-nfs-cluster.adoc @@ -10,12 +10,12 @@ If you use the Ceph via NFS backend with {rhos_component_storage_file_first_ref} it is easier for clients to mount their existing shares through the new NFS export locations. . You must propagate the `StorageNFS` network to the target nodes -where the `ceph-nfs` service will be deployed. +where the `ceph-nfs` service will be deployed. ifeval::["{build}" != "downstream"] See link:https://docs.openstack.org/project-deploy-guide/tripleo-docs/wallaby/features/network_isolation.html#deploying-the-overcloud-with-network-isolation[Deploying an Overcloud with Network Isolation with TripleO] and link:https://docs.openstack.org/project-deploy-guide/tripleo-docs/wallaby/post_deployment/updating_network_configuration_post_deployment.html[Applying network configuration changes after deployment] for the background to these -tasks. +tasks. endif::[] The following steps will be relevant if the Ceph Storage nodes were deployed via {OpenStackPreviousInstaller}. @@ -96,14 +96,14 @@ with a 3-node NFS cluster. * The `ingress-mode` argument must be set to ``haproxy-protocol``. No other ingress-mode will be supported. This ingress mode will allow enforcing client restrictions through {rhos_component_storage_file}. -ifeval::["{build}" != "downstream"] +ifeval::["{build}" != "downstream"] * For more information on deploying the clustered Ceph NFS service, see the link:https://docs.ceph.com/en/latest/cephadm/services/nfs/[ceph orchestrator documentation]. endif::[] -ifeval::["{build}" != "upstream"] +ifeval::["{build}" != "upstream"] For more information on deploying the clustered Ceph NFS service, see the -link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/7/html-single/operations_guide/index#management-of-nfs-ganesha-gateway-using-the-ceph-orchestrator[Management of NFS-Ganesha gateway using the Ceph Orchestrator (Limited Availability)] in _Red Hat Ceph Storage 7 Operations Guide_. +link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/7/html-single/operations_guide/index#management-of-nfs-ganesha-gateway-using-the-ceph-orchestrator[Management of NFS-Ganesha gateway using the Ceph Orchestrator (Limited Availability)] in _Red Hat Ceph Storage 7 Operations Guide_. endif::[] //kgilliga: Confirm that we should link to the Ceph Operations Guide downstream. * The following commands are run inside a `cephadm shell` to create a clustered diff --git a/docs_user/modules/proc_decommissioning-rhosp-standalone-ceph-NFS-service.adoc b/docs_user/modules/proc_decommissioning-rhosp-standalone-ceph-NFS-service.adoc index a057d93d9..79237e587 100644 --- a/docs_user/modules/proc_decommissioning-rhosp-standalone-ceph-NFS-service.adoc +++ b/docs_user/modules/proc_decommissioning-rhosp-standalone-ceph-NFS-service.adoc @@ -21,7 +21,7 @@ upgrades, it is recommended that the decommissioning period is short. . Once the old NFS service is no longer used, you can adjust the configuration for the `manila-share` service to remove the `cephfs_ganesha_server_ip` option. Doing this will restart the `manila-share` process and remove the export -locations that pertained to the old NFS service from all the shares. +locations that pertained to the old NFS service from all the shares. //kgilliga: Is this 2 separate steps? Remove the `cephfs_ganesha_server_ip` option, and then run oc patch openstackcontrolplane openstack? Or is the following code just an example of what the yaml file should look like? + [source,yaml] diff --git a/docs_user/modules/proc_deploying-a-ceph-ingress-daemon.adoc b/docs_user/modules/proc_deploying-a-ceph-ingress-daemon.adoc index 64de4f374..428edc3b9 100644 --- a/docs_user/modules/proc_deploying-a-ceph-ingress-daemon.adoc +++ b/docs_user/modules/proc_deploying-a-ceph-ingress-daemon.adoc @@ -3,11 +3,11 @@ = Deploying a {Ceph} ingress daemon To match the cardinality diagram, you use cephadm labels to refer to a group of nodes where a given daemon type should be deployed. For more information about the cardinality diagram, see xref:ceph-daemon-cardinality_{context}[{Ceph} daemon cardinality]. -`HAProxy` is managed by {OpenStackPreviousInstaller} through `Pacemaker`: the three running instances at this point will point to the old RGW backends, resulting in a broken configuration. +`HAProxy` is managed by {OpenStackPreviousInstaller} through `Pacemaker`: the three running instances at this point will point to the old RGW backends, resulting in a broken configuration. ifeval::["{build}" != "upstream"] Since you are going to deploy the Ceph ingress daemon, the first thing to do is remove the existing `ceph_rgw` config, clean up the config created by {OpenStackPreviousInstaller} and restart the service to make sure other services are not affected by this change. endif::[] -ifeval::["{build}" != "downstream"] +ifeval::["{build}" != "downstream"] Since you are going to deploy the https://github.com/openstack-archive/tripleo-ansible/blob/stable/wallaby/tripleo_ansible/ansible_plugins/modules/ceph_mkspec.py[Ceph ingress daemon], the first thing to do is remove the existing `ceph_rgw` config, clean up the config created by {OpenStackPreviousInstaller} and restart the service to make sure other services are not affected by this change. endif::[] After you complete this procedure, you can reach the RGW backend from the ingress daemon and use RGW through the {object_storage} command line interface (CLI). @@ -63,8 +63,8 @@ The {object_storage_first_ref} CLI fails at this point: HTTPConnectionPool(host='10.0.0.103', port=8080): Max retries exceeded with url: /swift/v1/AUTH_852f24425bb54fa896476af48cbe35d3?format=json (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 111] Connection refused')) ---- -. Set the required images for both HAProxy and Keepalived: -+ +. Set the required images for both HAProxy and Keepalived: ++ ---- ifeval::["{build}" != "downstream"] [ceph: root@controller-0 /]# ceph config set mgr mgr/cephadm/container_image_haproxy quay.io/ceph/haproxy:2.3 @@ -131,4 +131,3 @@ rgw.rgw ?:8090 3/3 37s ago 4m label:rgw anonymous[ceph: root@controller-0 /]# — ---- - diff --git a/docs_user/modules/proc_deploying-file-systems-service-control-plane.adoc b/docs_user/modules/proc_deploying-file-systems-service-control-plane.adoc index f093f4ffd..be3e65aa1 100644 --- a/docs_user/modules/proc_deploying-file-systems-service-control-plane.adoc +++ b/docs_user/modules/proc_deploying-file-systems-service-control-plane.adoc @@ -53,9 +53,9 @@ all of these can be ignored. {OpenShift} routes and ingress. * Pay attention to policy overrides. In {rhos_acro} {rhos_curr_ver}, the {rhos_component_storage_file} ships with a secure default RBAC, and overrides may not be necessary. -ifeval::["{build}" != "downstream"] +ifeval::["{build}" != "downstream"] Please review RBAC defaults by using the https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-policy-generator.html[Oslo policy generator] -tool. +tool. endif::[] If a custom policy is necessary, you must provide it as a `ConfigMap`. The following sample spec illustrates how a @@ -103,7 +103,7 @@ you will need to split them up when deploying {rhos_acro} {rhos_curr_ver}. Each backend driver needs to use its own instance of the `manila-share` service. * If a storage backend driver needs a custom container image, find it on the -https://catalog.redhat.com/software/containers/search?gs&q=manila[RHOSP Ecosystem Catalog] +https://catalog.redhat.com/software/containers/search?gs&q=manila[RHOSP Ecosystem Catalog] and set `manila: template: manilaShares: : containerImage` value. The following example illustrates multiple storage backend drivers, using custom container images. diff --git a/docs_user/modules/proc_deploying-the-bare-metal-provisioning-service.adoc b/docs_user/modules/proc_deploying-the-bare-metal-provisioning-service.adoc index 81ef7c9e6..01df2e942 100644 --- a/docs_user/modules/proc_deploying-the-bare-metal-provisioning-service.adoc +++ b/docs_user/modules/proc_deploying-the-bare-metal-provisioning-service.adoc @@ -22,8 +22,8 @@ must already be imported into the control plane MariaDB, {identity_service_first $CONTROLLER1_SSH cat /var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf > ironic.conf ---- + -[NOTE] -It is critical that this configuration file comes from one of the controllers and not a {OpenStackPreviousInstaller} undercloud node. The {OpenStackPreviousInstaller} undercloud node specifically operated with different configuration which would not be appropriate or applicable to apply when adopting the Overcloud Ironic deployment. +[NOTE] +It is critical that this configuration file comes from one of the controllers and not a {OpenStackPreviousInstaller} undercloud node. The {OpenStackPreviousInstaller} undercloud node specifically operated with different configuration which would not be appropriate or applicable to apply when adopting the Overcloud Ironic deployment. //kgilliga: What is meant by "overcloud Ironic deployment? Can this be changed to the "RHOSP Bare Metal Provisioning service deployment"? * If adopting the Ironic Inspector service you need the value of the `IronicInspectorSubnets` {OpenStackPreviousInstaller} parameter. Use the same values to populate the `dhcpRanges` parameter in the target environment. @@ -95,14 +95,14 @@ spec: ' ---- + -The operator begins to apply the configuration and start the necessary Bare Metal Provisioning services. Once the services have reached a running state, the {bare_metal} automatically begins polling the power state of bare metal nodes for which it is configured to manage. +The operator begins to apply the configuration and start the necessary Bare Metal Provisioning services. Once the services have reached a running state, the {bare_metal} automatically begins polling the power state of bare metal nodes for which it is configured to manage. -. Wait for Bare Metal Provisioning control plane services' custom resources to become ready: +. Wait for Bare Metal Provisioning control plane services' custom resources to become ready: + ---- oc wait --for condition=Ready --timeout=300s ironics.ironic.openstack.org ironic //kgilliga: Is "optionally verify the individual service" part of the code block, or is it a separate step? -# Optionally verify the individual services +# Optionally verify the individual services oc wait --for condition=Ready --timeout=300s ironicapis.ironic.openstack.org ironic-api oc wait --for condition=Ready --timeout=300s ironicconductors.ironic.openstack.org ironic-conductor oc wait --for condition=Ready --timeout=300s ironicinspectors.ironic.openstack.org ironic-inspector @@ -166,7 +166,7 @@ spec: ' ---- -.Verification +.Verification // TODO, this will need more work @@ -177,4 +177,4 @@ openstack endpoint list |grep ironic openstack baremetal node list ---- + -The time required for the {bare_metal} to review and reconcile the power state of bare metal nodes is dependent upon the number of operating conductors through the `replicas` parameter and which are present in the {bare_metal} deployment being adopted. +The time required for the {bare_metal} to review and reconcile the power state of bare metal nodes is dependent upon the number of operating conductors through the `replicas` parameter and which are present in the {bare_metal} deployment being adopted. diff --git a/docs_user/modules/proc_deploying-the-block-storage-services.adoc b/docs_user/modules/proc_deploying-the-block-storage-services.adoc index 448df616e..0c7cbdf08 100644 --- a/docs_user/modules/proc_deploying-the-block-storage-services.adoc +++ b/docs_user/modules/proc_deploying-the-block-storage-services.adoc @@ -199,4 +199,3 @@ openstack --os-volume-api-version 3.47 volume create --backup backup restored [NOTE] You do not boot a {compute_service_first_ref} instance using the new volume from image or try to detach the old volume because {compute_service} and the {block_storage} are still not connected. - diff --git a/docs_user/modules/proc_migrating-ceph-mds.adoc b/docs_user/modules/proc_migrating-ceph-mds.adoc index cd35abed0..189e2a5c1 100644 --- a/docs_user/modules/proc_migrating-ceph-mds.adoc +++ b/docs_user/modules/proc_migrating-ceph-mds.adoc @@ -5,7 +5,7 @@ In the context of data plane adoption, where the {rhos_prev_long} ({OpenStackShort}) services are redeployed in {OpenShift}, a {OpenStackPreviousInstaller}-deployed {CephCluster} cluster will undergo a migration in a process we are calling “externalizing” the {CephCluster} cluster. There are two deployment topologies, broadly, that include an “internal” {CephCluster} cluster today: one is where {OpenStackShort} includes dedicated {CephCluster} nodes to host object storage daemons (OSDs), and the other is Hyperconverged Infrastructure (HCI) where Compute nodes double up as {CephCluster} nodes. In either scenario, there are some {Ceph} processes that are deployed on {OpenStackShort} Controller nodes: {Ceph} monitors, Ceph Object Gateway (RGW), Rados Block Device (RBD), Ceph Metadata Server (MDS), Ceph Dashboard, and NFS Ganesha. -This document describes how to migrate the MDS daemon in case {rhos_component_storage_file_first_ref} (deployed with either a cephfs-native or ceph-nfs backend) is part of the overcloud deployment. The MDS migration is performed by cephadm, and as done for the other daemons, the general idea is to move the daemons placement from a "hosts" based approach to a "label" based one. +This document describes how to migrate the MDS daemon in case {rhos_component_storage_file_first_ref} (deployed with either a cephfs-native or ceph-nfs backend) is part of the overcloud deployment. The MDS migration is performed by cephadm, and as done for the other daemons, the general idea is to move the daemons placement from a "hosts" based approach to a "label" based one. ifeval::["{build}" != "upstream"] This ensures that the human operator can easily visualize the status of the cluster and where daemons are placed using the `ceph orch host` command, and have a general view of how the daemons are co-located within a given host, according to the https://access.redhat.com/articles/1548993[cardinality matrix]. endif::[] @@ -202,7 +202,7 @@ Standby daemons: [mds.mds.controller-1.tyiziq{-1:499136} state up:standby seq 1 addr [v2:172.17.3.43:6800/3615018301,v1:172.17.3.43:6801/3615018301] compat {c=[1],r=[1],i=[7ff]}] ---- -. To migrate MDS to the right nodes, set the MDS affinity that manages the MDS failover: +. To migrate MDS to the right nodes, set the MDS affinity that manages the MDS failover: ifeval::["{build}" != "downstream"] [NOTE] It is possible to elect as "active" a dedicated MDS for a particular file system. To configure this preference, `CephFS` provides a configuration option for MDS called `mds_join_fs` which enforces this affinity. @@ -264,4 +264,4 @@ ifeval::["{build}" != "downstream"] * https://docs.ceph.com/en/reef/cephfs/eviction[cephfs - eviction] * https://docs.ceph.com/en/reef/cephfs/standby/#configuring-mds-file-system-affinity[ceph mds - affinity] -endif::[] \ No newline at end of file +endif::[] diff --git a/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc b/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc index e4ce44c8e..f4653a30b 100644 --- a/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc +++ b/docs_user/modules/proc_migrating-databases-to-mariadb-instances.adoc @@ -7,7 +7,7 @@ This document describes how to move the databases from the original {rhos_prev_long} ({OpenStackShort}) deployment to the MariaDB instances in the {OpenShift} cluster. -[NOTE] +[NOTE] This example scenario describes a simple single-cell setup. Real multi-stack topology recommended for production use results in different cells DBs layout, and should be using different naming schemes (not covered @@ -52,7 +52,7 @@ SOURCE_GALERA_MEMBERS=( SOURCE_DB_ROOT_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }') ---- -* Prepare MariaDB copy directory and the adoption helper pod +* Prepare MariaDB copy directory and the adoption helper pod * Create a temporary folder to store adoption helper pod (pick storage requests to fit MySQL database size): + @@ -278,4 +278,3 @@ oc delete pod mariadb-copy-data oc delete pvc mariadb-data ---- For more information, see https://learn.redhat.com/t5/DO280-Red-Hat-OpenShift/About-pod-security-standards-and-warnings/m-p/32502[About pod security standards and warnings]. - diff --git a/docs_user/modules/proc_migrating-existing-daemons-to-target-nodes.adoc b/docs_user/modules/proc_migrating-existing-daemons-to-target-nodes.adoc index f1f3acd93..e60b31d35 100644 --- a/docs_user/modules/proc_migrating-existing-daemons-to-target-nodes.adoc +++ b/docs_user/modules/proc_migrating-existing-daemons-to-target-nodes.adoc @@ -98,7 +98,7 @@ prometheus.cephstorage-1 cephstorage-1.redhat.local 172.17.3.53:9092 ---- + [NOTE] -After you migrate the monitoring stack, you lose High Availability: the monitoring stack daemons have no VIP and HAproxy anymore; Node exporters are still running on all the nodes: instead of using labels we keep the current approach as we want to not reduce the monitoring space covered. +After you migrate the monitoring stack, you lose High Availability: the monitoring stack daemons have no VIP and HAproxy anymore; Node exporters are still running on all the nodes: instead of using labels we keep the current approach as we want to not reduce the monitoring space covered. . You must review the {Ceph}configuration to ensure that it is aligned with the relocation you just made. In particular, focus on the following configuration entries: //kgilliga: Note to self: Revisit the wording of this step. @@ -146,7 +146,7 @@ relocation. The service is provided by the Ceph Manager daemon, hence we might experience an impact when the active mgr is migrated or is force-failed. However, having three replicas definition allows to redirect requests to a different instance (it’s still an A/P model), hence the impact should be -limited. +limited. .. When the RBD migration is over, the following {Ceph} config keys must be regenerated to point to the right mgr container: + @@ -166,4 +166,3 @@ $ ceph orch ps | awk '/mgr./ {print $1}' ---- $ ceph config set mgr mgr/dashboard/<>/server_addr/ ---- - diff --git a/docs_user/modules/proc_migrating-mon-and-mgr-from-controller-nodes.adoc b/docs_user/modules/proc_migrating-mon-and-mgr-from-controller-nodes.adoc index b2827a58a..5357588fb 100644 --- a/docs_user/modules/proc_migrating-mon-and-mgr-from-controller-nodes.adoc +++ b/docs_user/modules/proc_migrating-mon-and-mgr-from-controller-nodes.adoc @@ -10,7 +10,7 @@ Migrate your Ceph Monitor daemons, Ceph Manager daemons, and object storage daem _admin nodes that administrators can use to manage the {CephCluster} cluster and perform day 2 operations against it. * Keep the {CephCluster} cluster operational during the migration. -The following procedure shows an example migration from a Controller node (`oc0-controller-1`) and a {Ceph} node (`oc0-ceph-0`). Use the names of the nodes in your environment. +The following procedure shows an example migration from a Controller node (`oc0-controller-1`) and a {Ceph} node (`oc0-ceph-0`). Use the names of the nodes in your environment. .Prerequisites @@ -112,7 +112,7 @@ oc0-controller-1 192.168.24.23 _admin mgr mon oc0-controller-2 192.168.24.13 _admin mgr mon ---- -. Log in to the `controller-0` node, then +. Log in to the `controller-0` node, then //kgilliga: Need more description of what is happening in this step. + ---- @@ -218,7 +218,7 @@ mon_host = [v2:172.16.11.54:3300/0,v1:172.16.11.54:6789/0] [v2:172.16.11.121:330 12: vlan14 inet 172.16.14.223/24 brd 172.16.14.255 scope global vlan14\ valid_lft forever preferred_lft forever ---- -. On the oc0-ceph-0, add the IP address of the mon that has been deleted from `controller-0`, and verify that the IP address has been assigned and can be reached: +. On the oc0-ceph-0, add the IP address of the mon that has been deleted from `controller-0`, and verify that the IP address has been assigned and can be reached: //kgilliga: Revisit this step. Do we need the [heat-admin @oc0-ceph-0 ~]$ ip -o -4 a] code block? Is that code block an example of the output? + ---- @@ -371,6 +371,3 @@ At this point the {CephCluster} cluster is clean: The `oc0-controller-1` is removed and powered off without leaving traces on the {CephCluster} cluster. . Repeat this procedure for additional Controller nodes in your environment until you have migrated all the Ceph Mon and Ceph Manager daemons to the target nodes. - - - diff --git a/docs_user/modules/proc_migrating-object-storage-data-to-rhoso-nodes.adoc b/docs_user/modules/proc_migrating-object-storage-data-to-rhoso-nodes.adoc index 461c2a0c4..1f6b4d289 100644 --- a/docs_user/modules/proc_migrating-object-storage-data-to-rhoso-nodes.adoc +++ b/docs_user/modules/proc_migrating-object-storage-data-to-rhoso-nodes.adoc @@ -23,7 +23,7 @@ external IP of the DNSMasq service as name server in `/etc/resolv.conf`: ---- oc get service dnsmasq-dns -o jsonpath="{.status.loadBalancer.ingress[0].ip}" | CONTROLLER1_SSH tee /etc/resolv.conf ---- -* To track the current status of the replication a tool called `swift-dispersion` is used. It consists of two parts, a population tool to be run before changing the {object_storage} rings and a report tool to run afterwards to gather the current status. Run the `swift-dispersion-populate` command: +* To track the current status of the replication a tool called `swift-dispersion` is used. It consists of two parts, a population tool to be run before changing the {object_storage} rings and a report tool to run afterwards to gather the current status. Run the `swift-dispersion-populate` command: //kgilliga: Is this a prerequisite? + ---- @@ -162,5 +162,3 @@ oc debug --keep-labels=true job/swift-ring-rebalance -- /bin/sh -c 'swift-ring-t ---- CONTROLLER1_SSH "find /srv/node/ -type f -name '*.db' -o -name '*.data' | wc -l" ---- - - diff --git a/docs_user/modules/proc_migrating-the-rgw-backends.adoc b/docs_user/modules/proc_migrating-the-rgw-backends.adoc index b0e6731e1..5f2e1fe6e 100644 --- a/docs_user/modules/proc_migrating-the-rgw-backends.adoc +++ b/docs_user/modules/proc_migrating-the-rgw-backends.adoc @@ -72,7 +72,7 @@ spec: ---- . In the `placement` section, replace the following values: -* Replace the controller nodes with the `label: rgw` label. +* Replace the controller nodes with the `label: rgw` label. * Change the ` rgw_frontend_port` value to `8090` to avoid conflicts with the Ceph ingress daemon. + ---- diff --git a/docs_user/modules/proc_migrating-tls-everywhere.adoc b/docs_user/modules/proc_migrating-tls-everywhere.adoc index 8e2fd4bf6..1b433e957 100644 --- a/docs_user/modules/proc_migrating-tls-everywhere.adoc +++ b/docs_user/modules/proc_migrating-tls-everywhere.adoc @@ -41,7 +41,7 @@ IPA_SSH="ssh -i root@" ---- endif::[] -* Define the map of Compute node name, IP pairs. +* Define the map of Compute node name, IP pairs. //kgilliga: Is this a separate prerequisite or is this part of "defining shell variables"? + [subs=+quotes] diff --git a/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc b/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc index 61c847234..d82784368 100644 --- a/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc +++ b/docs_user/modules/proc_performing-a-fast-forward-upgrade-on-compute-services.adoc @@ -9,7 +9,7 @@ is done independently of each other, by configuring `[upgrade_levels]compute=auto` for Compute services. Compute control plane services apply the change right after custom resource (CR) is patched. Compute data plane services will catch up the same config change with ansible deployment later on. -//[NOTE] +//[NOTE] //Additional orchestration happening around the FFU workarounds //configuration for Compute data plane service is a subject of future changes. kgilliga: We don't comment on future changes downstream. diff --git a/docs_user/modules/proc_preparing-block-storage-service-by-customizing-configuration.adoc b/docs_user/modules/proc_preparing-block-storage-service-by-customizing-configuration.adoc index e5034cd85..bcce7307e 100644 --- a/docs_user/modules/proc_preparing-block-storage-service-by-customizing-configuration.adoc +++ b/docs_user/modules/proc_preparing-block-storage-service-by-customizing-configuration.adoc @@ -123,4 +123,3 @@ spec: - key: policy path: policy.yaml ---- - diff --git a/docs_user/modules/proc_relocating-one-instance-of-a-monitoring-stack-to-migrate-daemons-to-target-nodes.adoc b/docs_user/modules/proc_relocating-one-instance-of-a-monitoring-stack-to-migrate-daemons-to-target-nodes.adoc index 861970ae3..361c4b0ef 100644 --- a/docs_user/modules/proc_relocating-one-instance-of-a-monitoring-stack-to-migrate-daemons-to-target-nodes.adoc +++ b/docs_user/modules/proc_relocating-one-instance-of-a-monitoring-stack-to-migrate-daemons-to-target-nodes.adoc @@ -155,9 +155,9 @@ relocation. The service is provided by the Ceph Manager daemon, hence we might experience an impact when the active mgr is migrated or is force-failed. However, having three replicas definition allows to redirect requests to a different instance (it’s still an A/P model), hence the impact should be -limited. +limited. .. When the RBD migration is over, the following {Ceph} config keys must -be regenerated to point to the right mgr container: +be regenerated to point to the right mgr container: //kgilliga: Is the assumption that customers have already done the RBD migration before doing this procedure? RBD migration comes before this chapter in the adoption guide. + ---- @@ -176,4 +176,3 @@ $ ceph orch ps | awk '/mgr./ {print $1}' ---- $ ceph config set mgr mgr/dashboard/<>/server_addr/ ---- - diff --git a/docs_user/modules/proc_retrieving-network-information-from-your-existing-deployment.adoc b/docs_user/modules/proc_retrieving-network-information-from-your-existing-deployment.adoc index 5ebe732be..49b5e8c9c 100644 --- a/docs_user/modules/proc_retrieving-network-information-from-your-existing-deployment.adoc +++ b/docs_user/modules/proc_retrieving-network-information-from-your-existing-deployment.adoc @@ -65,7 +65,3 @@ At the end of this process, you should have the following information: * A list of existing IP address allocations used in the environment. You will later exclude these addresses from allocation pools available for the new control plane services. - - - - diff --git a/docs_user/modules/proc_retrieving-topology-specific-service-configuration.adoc b/docs_user/modules/proc_retrieving-topology-specific-service-configuration.adoc index eb8f3a966..14b1ac1fb 100644 --- a/docs_user/modules/proc_retrieving-topology-specific-service-configuration.adoc +++ b/docs_user/modules/proc_retrieving-topology-specific-service-configuration.adoc @@ -20,7 +20,7 @@ SOURCE_DB_ROOT_PASSWORD=$(cat ~/tripleo-standalone-passwords.yaml | grep ' Mysql .Procedure -. Export shell variables for the following outputs to compare it with post-adoption values later on: +. Export shell variables for the following outputs to compare it with post-adoption values later on: Test connection to the original database: //kgilliga: Are these two separate actions, or are both happening in the following command? + diff --git a/docs_user/modules/proc_reusing-existing-subnet-ranges.adoc b/docs_user/modules/proc_reusing-existing-subnet-ranges.adoc index 2f0645183..56f75337a 100644 --- a/docs_user/modules/proc_reusing-existing-subnet-ranges.adoc +++ b/docs_user/modules/proc_reusing-existing-subnet-ranges.adoc @@ -22,5 +22,4 @@ overlap with the new allocation pools configured for {rhos_prev_long} control pl If you are especially constrained by the size of the existing subnet, you may have to apply elaborate exclusion rules when defining allocation pools for the -new control plane services. For more information, see - +new control plane services. For more information, see diff --git a/docs_user/modules/proc_stopping-openstack-services.adoc b/docs_user/modules/proc_stopping-openstack-services.adoc index 04c69b83b..941c91534 100644 --- a/docs_user/modules/proc_stopping-openstack-services.adoc +++ b/docs_user/modules/proc_stopping-openstack-services.adoc @@ -23,7 +23,7 @@ Note that you should not stop the infrastructure management services yet, such a .Prerequisites -* Confirm that there no long-running operations that require the services you plan to stop. +* Confirm that there no long-running operations that require the services you plan to stop. * Ensure that there are no ongoing instance live migrations, volume migrations (online or offline), volume creation, backup restore, attaching, detaching, and so on. + diff --git a/docs_user/modules/proc_using-new-subnet-ranges.adoc b/docs_user/modules/proc_using-new-subnet-ranges.adoc index ebe9b08ea..507174422 100644 --- a/docs_user/modules/proc_using-new-subnet-ranges.adoc +++ b/docs_user/modules/proc_using-new-subnet-ranges.adoc @@ -148,4 +148,3 @@ setting `edpm_network_config_update: true` to enforce the changes. Note that the examples above are incomplete and should be incorporated into your general configuration. - diff --git a/docs_user/modules/proc_verifying-the-image-service-adoption.adoc b/docs_user/modules/proc_verifying-the-image-service-adoption.adoc index 484aecb1f..651ff7460 100644 --- a/docs_user/modules/proc_verifying-the-image-service-adoption.adoc +++ b/docs_user/modules/proc_verifying-the-image-service-adoption.adoc @@ -128,4 +128,4 @@ sh-4.4$ rbd -p images ls 46a3eac1-7224-40bc-9083-f2f0cd122ba4 c3158cad-d50b-452f-bec1-f250562f5c1f ---- -endif::[] \ No newline at end of file +endif::[] diff --git a/tests/roles/development_environment/files/pre_launch.bash b/tests/roles/development_environment/files/pre_launch.bash index 88dcdb070..e035ff0ec 100755 --- a/tests/roles/development_environment/files/pre_launch.bash +++ b/tests/roles/development_environment/files/pre_launch.bash @@ -86,4 +86,3 @@ fi if ${BASH_ALIASES[openstack]} volume show disk -f json | jq -r '.status' | grep -q available ; then ${BASH_ALIASES[openstack]} server add volume test disk fi -