From e7e832d1c1f158d40a6464ebad4dcaad14b0aac2 Mon Sep 17 00:00:00 2001 From: Katie Gilligan Date: Mon, 29 Apr 2024 13:16:31 -0400 Subject: [PATCH 1/3] restructuring image service section --- ...ting-openstack-control-plane-services.adoc | 2 +- .../assembly_adopting-the-image-service.adoc | 218 +++++++ ...openstack-control-plane-configuration.adoc | 2 +- ...ge-service-with-block-storage-backend.adoc | 93 +++ ...mage-service-with-nfs-ganesha-backend.adoc | 211 ++++++ ...e-service-with-object-storage-backend.adoc | 89 +++ .../proc_adopting-the-image-service.adoc | 598 ------------------ 7 files changed, 613 insertions(+), 600 deletions(-) create mode 100644 docs_user/assemblies/assembly_adopting-the-image-service.adoc create mode 100644 docs_user/modules/proc_adopting-image-service-with-block-storage-backend.adoc create mode 100644 docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc create mode 100644 docs_user/modules/proc_adopting-image-service-with-object-storage-backend.adoc delete mode 100644 docs_user/modules/proc_adopting-the-image-service.adoc diff --git a/docs_user/assemblies/assembly_adopting-openstack-control-plane-services.adoc b/docs_user/assemblies/assembly_adopting-openstack-control-plane-services.adoc index 50af9d767..add2cb956 100644 --- a/docs_user/assemblies/assembly_adopting-openstack-control-plane-services.adoc +++ b/docs_user/assemblies/assembly_adopting-openstack-control-plane-services.adoc @@ -16,7 +16,7 @@ include::../modules/proc_adopting-the-networking-service.adoc[leveloffset=+1] include::../modules/proc_adopting-the-object-storage-service.adoc[leveloffset=+1] -include::../modules/proc_adopting-the-image-service.adoc[leveloffset=+1] +include::../assemblies/assembly_adopting-the-image-service.adoc[leveloffset=+1] include::../modules/proc_adopting-the-placement-service.adoc[leveloffset=+1] diff --git a/docs_user/assemblies/assembly_adopting-the-image-service.adoc b/docs_user/assemblies/assembly_adopting-the-image-service.adoc new file mode 100644 index 000000000..af9a59ffb --- /dev/null +++ b/docs_user/assemblies/assembly_adopting-the-image-service.adoc @@ -0,0 +1,218 @@ +[id="adopting-the-image-service_{context}"] + +:context: image-service +//Check xref context."Reviewing the OpenStack configuration" xref does not work. + += Adopting the {image_service} + +Adopting {image_service_first_ref} means that an existing `OpenStackControlPlane` custom resource (CR), where {image_service} +is supposed to be disabled, should be patched to start the service with the +configuration parameters provided by the source environment. + +When the procedure is over, the expectation is to see the `GlanceAPI` service +up and running: the {identity_service} endpoints are updated and the same backend of the source Cloud is available. If the conditions above are met, the adoption is considered concluded. + +This guide also assumes that: + +* A `TripleO` environment (the source Cloud) is running on one side. +* A `SNO` / `CodeReadyContainers` is running on the other side. +* (optional) An internal/external `Ceph` cluster is reachable by both `crc` and `TripleO`. + +ifeval::["{build}" != "downstream"] +//This link goes to a 404. Do we need this text downstream? +As already done for https://github.com/openstack-k8s-operators/data-plane-adoption/blob/main/keystone_adoption.md[Keystone], the Glance Adoption follows the same pattern. +endif::[] + +.Using Ceph storage backend + +If a Ceph backend is used, the `customServiceConfig` parameter should +be used to inject the right configuration to the `GlanceAPI` instance. + +Make sure the Ceph-related secret (`ceph-conf-files`) was created in +the `openstack` namespace and that the `extraMounts` property of the +`OpenStackControlPlane` CR has been configured properly. These tasks +are described in an earlier Adoption step xref:configuring-a-ceph-backend_migrating-databases[Configuring a Ceph backend]. + +---- +cat << EOF > glance_patch.yaml +spec: + glance: + enabled: true + template: + databaseInstance: openstack + customServiceConfig: | + [DEFAULT] + enabled_backends=default_backend:rbd + [glance_store] + default_backend=default_backend + [default_backend] + rbd_store_ceph_conf=/etc/ceph/ceph.conf + rbd_store_user=openstack + rbd_store_pool=images + store_description=Ceph glance store backend. + storageClass: "local-storage" + storageRequest: 10G + glanceAPIs: + default: + replicas: 1 + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + networkAttachments: + - storage +EOF +---- + +If you have previously backup your {OpenStackShort} services configuration file from the old environment, you can use os-diff to compare and make sure the configuration is correct. +For more information, see xref:reviewing-the-openstack-control-plane-configuration_adopt-control-plane[Reviewing the OpenStack control plane configuration]. + +---- +pushd os-diff +./os-diff cdiff --service glance -c /tmp/collect_tripleo_configs/glance/etc/glance/glance-api.conf -o glance_patch.yaml +---- + +This will produce the difference between both ini configuration files. + +Patch OpenStackControlPlane to deploy {image_service} with Ceph backend: + +---- +oc patch openstackcontrolplane openstack --type=merge --patch-file glance_patch.yaml +---- + +.Verification + +* Test the glance service from the {OpenStackShort} CLI. + +You can compare and make sure the configuration has been correctly applied to the glance pods by running + +---- +./os-diff cdiff --service glance -c /etc/glance/glance.conf.d/02-config.conf -o glance_patch.yaml --frompod -p glance-api +---- + +If no line appear, then the configuration is correctly done. + +Inspect the resulting glance pods: + +---- +GLANCE_POD=`oc get pod |grep glance-default-external-0 | cut -f 1 -d' '` +oc exec -t $GLANCE_POD -c glance-api -- cat /etc/glance/glance.conf.d/02-config.conf + +[DEFAULT] +enabled_backends=default_backend:rbd +[glance_store] +default_backend=default_backend +[default_backend] +rbd_store_ceph_conf=/etc/ceph/ceph.conf +rbd_store_user=openstack +rbd_store_pool=images +store_description=Ceph glance store backend. + +oc exec -t $GLANCE_POD -c glance-api -- ls /etc/ceph +ceph.client.openstack.keyring +ceph.conf +---- + +Ceph secrets are properly mounted, at this point let's move to the {OpenStackShort} +CLI and check the service is active and the endpoints are properly updated. + +---- +(openstack)$ service list | grep image + +| fc52dbffef36434d906eeb99adfc6186 | glance | image | + +(openstack)$ endpoint list | grep image + +| 569ed81064f84d4a91e0d2d807e4c1f1 | regionOne | glance | image | True | internal | http://glance-internal-openstack.apps-crc.testing | +| 5843fae70cba4e73b29d4aff3e8b616c | regionOne | glance | image | True | public | http://glance-public-openstack.apps-crc.testing | +| 709859219bc24ab9ac548eab74ad4dd5 | regionOne | glance | image | True | admin | http://glance-admin-openstack.apps-crc.testing | +---- + +Check that the images that you previously listed in the source Cloud are available in the adopted service: + +---- +(openstack)$ image list ++--------------------------------------+--------+--------+ +| ID | Name | Status | ++--------------------------------------+--------+--------+ +| c3158cad-d50b-452f-bec1-f250562f5c1f | cirros | active | ++--------------------------------------+--------+--------+ +---- + +* Image upload. +You can test that an image can be created on the adopted service. + +---- +(openstack)$ alias openstack="oc exec -t openstackclient -- openstack" +(openstack)$ curl -L -o /tmp/cirros-0.5.2-x86_64-disk.img http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img + qemu-img convert -O raw /tmp/cirros-0.5.2-x86_64-disk.img /tmp/cirros-0.5.2-x86_64-disk.img.raw + openstack image create --container-format bare --disk-format raw --file /tmp/cirros-0.5.2-x86_64-disk.img.raw cirros2 + openstack image list + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 273 100 273 0 0 1525 0 --:--:-- --:--:-- --:--:-- 1533 + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 +100 15.5M 100 15.5M 0 0 17.4M 0 --:--:-- --:--:-- --:--:-- 17.4M + ++------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ +| Field | Value | ++------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ +| container_format | bare | +| created_at | 2023-01-31T21:12:56Z | +| disk_format | raw | +| file | /v2/images/46a3eac1-7224-40bc-9083-f2f0cd122ba4/file | +| id | 46a3eac1-7224-40bc-9083-f2f0cd122ba4 | +| min_disk | 0 | +| min_ram | 0 | +| name | cirros | +| owner | 9f7e8fdc50f34b658cfaee9c48e5e12d | +| properties | os_hidden='False', owner_specified.openstack.md5='', owner_specified.openstack.object='images/cirros', owner_specified.openstack.sha256='' | +| protected | False | +| schema | /v2/schemas/image | +| status | queued | +| tags | | +| updated_at | 2023-01-31T21:12:56Z | +| visibility | shared | ++------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ + ++--------------------------------------+--------+--------+ +| ID | Name | Status | ++--------------------------------------+--------+--------+ +| 46a3eac1-7224-40bc-9083-f2f0cd122ba4 | cirros2| active | +| c3158cad-d50b-452f-bec1-f250562f5c1f | cirros | active | ++--------------------------------------+--------+--------+ + + +(openstack)$ oc rsh ceph +sh-4.4$ ceph -s +r cluster: + id: 432d9a34-9cee-4109-b705-0c59e8973983 + health: HEALTH_OK + + services: + mon: 1 daemons, quorum a (age 4h) + mgr: a(active, since 4h) + osd: 1 osds: 1 up (since 4h), 1 in (since 4h) + + data: + pools: 5 pools, 160 pgs + objects: 46 objects, 224 MiB + usage: 247 MiB used, 6.8 GiB / 7.0 GiB avail + pgs: 160 active+clean + +sh-4.4$ rbd -p images ls +46a3eac1-7224-40bc-9083-f2f0cd122ba4 +c3158cad-d50b-452f-bec1-f250562f5c1f +---- + +include::../modules/proc_adopting-image-service-with-object-storage-backend.adoc[leveloffset=+1] + +include::../modules/proc_adopting-image-service-with-block-storage-backend.adoc[leveloffset=+1] + +include::../modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc[leveloffset=+1] \ No newline at end of file diff --git a/docs_user/assemblies/assembly_reviewing-the-openstack-control-plane-configuration.adoc b/docs_user/assemblies/assembly_reviewing-the-openstack-control-plane-configuration.adoc index 785700714..fa552a425 100644 --- a/docs_user/assemblies/assembly_reviewing-the-openstack-control-plane-configuration.adoc +++ b/docs_user/assemblies/assembly_reviewing-the-openstack-control-plane-configuration.adoc @@ -2,7 +2,7 @@ :context: reviewing-configuration -= Reviewing the {rhos_prev_long} control plane configruation += Reviewing the {rhos_prev_long} control plane configuration Before starting the adoption workflow, pull the configuration from the {rhos_prev_long} services and {OpenStackPreviousInstaller} on your file system to back up the configuration files. You can then use the files later, during the configuration of the adopted services, and for the record to compare and make sure nothing has been missed or misconfigured. diff --git a/docs_user/modules/proc_adopting-image-service-with-block-storage-backend.adoc b/docs_user/modules/proc_adopting-image-service-with-block-storage-backend.adoc new file mode 100644 index 000000000..6a79f0737 --- /dev/null +++ b/docs_user/modules/proc_adopting-image-service-with-block-storage-backend.adoc @@ -0,0 +1,93 @@ +[id="adopting-image-service-with-block-storage-backend_{context}"] + += Adopting the {image_service} that is deployed with a {block_storage} backend + +Adopt the {image_service_first_ref} that you deployed with an {block_storage_first_ref} backend. When {image_service} is deployed with {block_storage} as a backend in the {rhos_prev_long} environment based on {OpenStackPreviousInstaller}, the control plane `glanceAPI` instance is deployed with the following configuration: + +---- +.. +spec + glance: + ... + customServiceConfig: | + [DEFAULT] + enabled_backends = default_backend:cinder + [glance_store] + default_backend = default_backend + [default_backend] + rootwrap_config = /etc/glance/rootwrap.conf + description = Default cinder backend + cinder_store_auth_address = {{ .KeystoneInternalURL }} + cinder_store_user_name = {{ .ServiceUser }} + cinder_store_password = {{ .ServicePassword }} + cinder_store_project_name = service + cinder_catalog_info = volumev3::internalURL + cinder_use_multipath = true +---- + +.Prerequisites + +* Previous Adoption steps completed. Notably, MariaDB, Keystone and Barbican +should be already adopted. + +.Procedure + +. Write the patch manifest into a file, for example `glance_cinder.patch`. +For example: ++ +---- +spec: + glance: + enabled: true + apiOverride: + route: {} + template: + databaseInstance: openstack + storageClass: "local-storage" + storageRequest: 10G + customServiceConfig: | + [DEFAULT] + enabled_backends = default_backend:cinder + [glance_store] + default_backend = default_backend + [default_backend] + rootwrap_config = /etc/glance/rootwrap.conf + description = Default cinder backend + cinder_store_auth_address = {{ .KeystoneInternalURL }} + cinder_store_user_name = {{ .ServiceUser }} + cinder_store_password = {{ .ServicePassword }} + cinder_store_project_name = service + cinder_catalog_info = volumev3::internalURL + cinder_use_multipath = true + glanceAPIs: + default: + replicas: 1 + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + networkAttachments: + - storage +---- ++ +Having {block_storage} as a backend establishes a dependency between the two services, and any deployed `GlanceAPI` instance would not work if the {image_service} is configured with {block_storage} that is still not available in the `OpenStackControlPlane`. +Once {block_storage}, and in particular `CinderVolume`, has been adopted through the procedure described in <>, it is possible to proceed with the `GlanceAPI` adoption. + +. Verify that `CinderVolume` is available: ++ +---- +$ oc get pod -l component=cinder-volume | grep Running +cinder-volume-75cb47f65-92rxq 3/3 Running 0 +---- + +. Patch the `GlanceAPI` service deployed in the control plane context: ++ +---- +oc patch openstackcontrolplane openstack --type=merge --patch-file=glance_cinder.patch +---- diff --git a/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc b/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc new file mode 100644 index 000000000..081fe75b9 --- /dev/null +++ b/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc @@ -0,0 +1,211 @@ +[id="adopting-image-service-with-nfs-ganesha-backend_{context}"] + += Adopting the {image_service} that is deployed with an NFS Ganesha backend + +Adopt the {image_service_first_ref} that you deployed with an NFS Ganesha backend. When {image_service} is deployed with NFS Ganesha as a backend in the {rhos_prev_long} environment based on {OpenStackPreviousInstaller}, the control plane `glanceAPI` instance is deployed with the following configuration: + +.Prerequisites + +* Previous Adoption steps completed. Notably, MariaDB, Keystone and Barbican +should be already adopted. + +.Procedure + +When the source Cloud based on TripleO uses the {image_service} with a NFS Ganesha backend, before +patching the OpenStackControlPlane to deploy the {image_service} it is important to validate +a few networking related prerequisites. +In the source cloud, verify the NFS parameters used by the overcloud to configure +the {image_service} backend. +In particular, find among the TripleO heat templates the following variables that are usually an override of the default content provided by +`/usr/share/openstack-tripleo-heat-templates/environments/storage/glance-nfs.yaml`[glance-nfs.yaml].: + +--- + +**GlanceBackend**: file + +**GlanceNfsEnabled**: true + +**GlanceNfsShare**: 192.168.24.1:/var/nfs + +--- + +In the example above, as the first variable shows, unlike Cinder, the {image_service} has no +notion of NFS backend: the `File` driver is used in this scenario, and behind the +scenes, the `filesystem_store_datadir` which usually points to `/var/lib/glance/images/` +is mapped to the export value provided by the `GlanceNfsShare` variable. +If the `GlanceNfsShare` is not exported through a network that is supposed to be +propagated to the adopted {rhos_prev_long} control plane, an extra action is required +by the human administrator, who must stop the `nfs-server` and remap the export +to the `storage` network. This action usually happens when the {image_service} is +stopped in the source controller nodes. +In the podified control plane, as per the +(https://github.com/openstack-k8s-operators/docs/blob/main/images/network_diagram.jpg)[network isolation diagram], +the {image_service} is attached to the Storage network, propagated via the associated +`NetworkAttachmentsDefinition` CR, and the resulting Pods have already the right +permissions to handle the Image Service traffic through this network. +In a deployed {OpenStackShort} control plane, you can verify that the network mapping +matches with what has been deployed in the TripleO based environment by checking +both the `NodeNetworkConfigPolicy` (`nncp`) and the `NetworkAttachmentDefinition` +(`net-attach-def`) with the following commands: + +``` +$ oc get nncp +NAME STATUS REASON +enp6s0-crc-8cf2w-master-0 Available SuccessfullyConfigured + +$ oc get net-attach-def +NAME +ctlplane +internalapi +storage +tenant + +$ oc get ipaddresspool -n metallb-system +NAME AUTO ASSIGN AVOID BUGGY IPS ADDRESSES +ctlplane true false ["192.168.122.80-192.168.122.90"] +internalapi true false ["172.17.0.80-172.17.0.90"] +storage true false ["172.18.0.80-172.18.0.90"] +tenant true false ["172.19.0.80-172.19.0.90"] +``` + +The above represents an example of the output that should be checked in the +{OpenShift} environment to make sure there are no issues with the propagated +networks. + +The following steps assume that: + +1. the Storage network has been propagated to the {OpenStackShort} control plane +2. The {image_service} is able to reach the Storage network and connect to the nfs-server + through the port `2049`. + +If the above conditions are met, it is possible to adopt the {image_service} +and create a new `default` `GlanceAPI` instance connected with the existing +NFS share. + +---- +cat << EOF > glance_nfs_patch.yaml + +spec: + extraMounts: + - extraVol: + - extraVolType: Nfs + mounts: + - mountPath: /var/lib/glance/images + name: nfs + propagation: + - Glance + volumes: + - name: nfs + nfs: + path: /var/nfs + server: 172.17.3.20 + name: r1 + region: r1 + glance: + enabled: true + template: + databaseInstance: openstack + customServiceConfig: | + [DEFAULT] + enabled_backends = default_backend:file + [glance_store] + default_backend = default_backend + [default_backend] + filesystem_store_datadir = /var/lib/glance/images/ + storageClass: "local-storage" + storageRequest: 10G + glanceAPIs: + default: + replicas: 1 + type: single + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + networkAttachments: + - storage +EOF +---- + +[NOTE] +Replace in `glance_nfs_patch.yaml` the `nfs/server` ip address with the IP used +to reach the `nfs-server` and make sure the `nfs/path` points to the exported +path in the `nfs-server`. + +Patch `OpenStackControlPlane` to deploy {image_service} with a NFS backend: + +---- +oc patch openstackcontrolplane openstack --type=merge --patch-file glance_nfs_patch.yaml +---- + +When GlanceAPI is active, you can see a single API instance: + +``` +$ oc get pods -l service=glance +NAME READY STATUS RESTARTS +glance-default-single-0 3/3 Running 0 +``` + +and the description of the pod must report: + +``` +Mounts: +... + nfs: + Type: NFS (an NFS mount that lasts the lifetime of a pod) + Server: {{ server ip address }} + Path: {{ nfs export path }} + ReadOnly: false +... +``` + +It is also possible to double check the mountpoint by running the following: + +``` +oc rsh -c glance-api glance-default-single-0 + +sh-5.1# mount +... +... +{{ ip address }}:/var/nfs on /var/lib/glance/images type nfs4 (rw,relatime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.18.0.5,local_lock=none,addr=172.18.0.5) +... +... +``` + +You can run an `openstack image create` command and double check, on the NFS +node, the uuid has been created in the exported directory. + +For example: + +``` +$ oc rsh openstackclient +$ openstack image list + +sh-5.1$ curl -L -o /tmp/cirros-0.5.2-x86_64-disk.img http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img +... +... + +sh-5.1$ openstack image create --container-format bare --disk-format raw --file /tmp/cirros-0.5.2-x86_64-disk.img cirros +... +... + +sh-5.1$ openstack image list ++--------------------------------------+--------+--------+ +| ID | Name | Status | ++--------------------------------------+--------+--------+ +| 634482ca-4002-4a6d-b1d5-64502ad02630 | cirros | active | ++--------------------------------------+--------+--------+ +``` + +On the nfs-server node, the same `uuid` is in the exported `/var/nfs`: + +``` +$ ls /var/nfs/ +634482ca-4002-4a6d-b1d5-64502ad02630 +``` diff --git a/docs_user/modules/proc_adopting-image-service-with-object-storage-backend.adoc b/docs_user/modules/proc_adopting-image-service-with-object-storage-backend.adoc new file mode 100644 index 000000000..d3024eef7 --- /dev/null +++ b/docs_user/modules/proc_adopting-image-service-with-object-storage-backend.adoc @@ -0,0 +1,89 @@ +[id="adopting-image-service-with-object-storage-backend_{context}"] + += Adopting the {image_service} that is deployed with a {object_storage} backend + +Adopt the {image_service_first_ref} that you deployed with an {object_storage_first_ref} backend. When {image_service} is deployed with {object_storage_first_ref} as a backend in the {rhos_prev_long} environment based on {OpenStackPreviousInstaller}, the control plane `glanceAPI` instance is deployed with the following configuration: + +---- +.. +spec + glance: + ... + customServiceConfig: | + [DEFAULT] + enabled_backends = default_backend:swift + [glance_store] + default_backend = default_backend + [default_backend] + swift_store_create_container_on_put = True + swift_store_auth_version = 3 + swift_store_auth_address = {{ .KeystoneInternalURL }} + swift_store_endpoint_type = internalURL + swift_store_user = service:glance + swift_store_key = {{ .ServicePassword }} +---- + +.Prerequisites + +* Previous Adoption steps completed. Notably, MariaDB, Keystone and Barbican +should be already adopted. + +.Procedure + +. Write the patch manifest into a file, for example `glance_swift.patch`. +For example: ++ +---- +spec: + glance: + enabled: true + apiOverride: + route: {} + template: + databaseInstance: openstack + storageClass: "local-storage" + storageRequest: 10G + customServiceConfig: | + [DEFAULT] + enabled_backends = default_backend:swift + [glance_store] + default_backend = default_backend + [default_backend] + swift_store_create_container_on_put = True + swift_store_auth_version = 3 + swift_store_auth_address = {{ .KeystoneInternalURL }} + swift_store_endpoint_type = internalURL + swift_store_user = service:glance + swift_store_key = {{ .ServicePassword }} + glanceAPIs: + default: + replicas: 1 + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + networkAttachments: + - storage +---- ++ +Having {object_storage} as a backend establishes a dependency between the two services, and any deployed `GlanceAPI` instance would not work if {image_service} is configured with {object_storage} that is still not available in the `OpenStackControlPlane`. +Once {object_storage}, and in particular `SwiftProxy`, has been adopted through the procedure described in <>, it is possible to proceed with the `GlanceAPI` adoption. + +. Verify that `SwiftProxy` is available: ++ +---- +$ oc get pod -l component=swift-proxy | grep Running +swift-proxy-75cb47f65-92rxq 3/3 Running 0 +---- + +. Patch the `GlanceAPI` service deployed in the control plane context: ++ +---- +$ oc patch openstackcontrolplane openstack --type=merge --patch-file=glance_swift.patch +---- diff --git a/docs_user/modules/proc_adopting-the-image-service.adoc b/docs_user/modules/proc_adopting-the-image-service.adoc deleted file mode 100644 index 12cf50532..000000000 --- a/docs_user/modules/proc_adopting-the-image-service.adoc +++ /dev/null @@ -1,598 +0,0 @@ -[id="adopting-the-image-service_{context}"] - -//Check xref context. -//Need to revisit this procedure. It will likely need to be split into multiple procedures: one for each backend. Do the verification steps apply to each backend? - -= Adopting the {image_service} - -Adopting {image_service_first_ref} means that an existing `OpenStackControlPlane` custom resource (CR), where {image_service} -is supposed to be disabled, should be patched to start the service with the -configuration parameters provided by the source environment. - -When the procedure is over, the expectation is to see the `GlanceAPI` service -up and running: the {identity_service} endpoints are updated and the same backend of the source Cloud is available. If the conditions above are met, the adoption is considered concluded. - -This guide also assumes that: - -. A `TripleO` environment (the source Cloud) is running on one side; -. A `SNO` / `CodeReadyContainers` is running on the other side; -. (optional) an internal/external `Ceph` cluster is reachable by both `crc` and `TripleO` - -.Prerequisites - -* Previous Adoption steps completed. Notably, MariaDB, Keystone and Barbican -should be already adopted. - -.Procedure -//This link goes to a 404. Do we need this text downstream? -As already done for https://github.com/openstack-k8s-operators/data-plane-adoption/blob/main/keystone_adoption.md[Keystone], the Glance Adoption follows the same pattern. - -.Using {object_storage} backend - -When {image_service} is deployed with {object_storage_first_ref} as a backend in the source environment based -on TripleO, the control plane `glanceAPI` instance is deployed with the following -configuration: - ----- -.. -spec - glance: - ... - customServiceConfig: | - [DEFAULT] - enabled_backends = default_backend:swift - [glance_store] - default_backend = default_backend - [default_backend] - swift_store_create_container_on_put = True - swift_store_auth_version = 3 - swift_store_auth_address = {{ .KeystoneInternalURL }} - swift_store_endpoint_type = internalURL - swift_store_user = service:glance - swift_store_key = {{ .ServicePassword }} ----- - -It is recommended to write the patch manifest into a file, for example `glance_swift.patch`. -For example, the {image_service} deployment with a {object_storage} backend would look like this: - ----- -spec: - glance: - enabled: true - apiOverride: - route: {} - template: - databaseInstance: openstack - storageClass: "local-storage" - storageRequest: 10G - customServiceConfig: | - [DEFAULT] - enabled_backends = default_backend:swift - [glance_store] - default_backend = default_backend - [default_backend] - swift_store_create_container_on_put = True - swift_store_auth_version = 3 - swift_store_auth_address = {{ .KeystoneInternalURL }} - swift_store_endpoint_type = internalURL - swift_store_user = service:glance - swift_store_key = {{ .ServicePassword }} - glanceAPIs: - default: - replicas: 1 - override: - service: - internal: - metadata: - annotations: - metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 - spec: - type: LoadBalancer - networkAttachments: - - storage ----- - -Having {object_storage} as a backend establishes a dependency between the two services, -and any deployed `GlanceAPI` instance would **not work** if {image_service} is configured with {object_storage} that is still not available in the `OpenStackControlPlane`. -Once {object_storage}, and in particular `SwiftProxy`, has been adopted through the -procedure described in <>, it is possible -to proceed with the `GlanceAPI` adoption. - -Verify that `SwiftProxy` is available with the following command: - ----- -$ oc get pod -l component=swift-proxy | grep Running -swift-proxy-75cb47f65-92rxq 3/3 Running 0 ----- - -If the output is similar to the above, it is possible to move forward and patch -the `GlanceAPI` service deployed in the control plane context with the following -command: - ----- -oc patch openstackcontrolplane openstack --type=merge --patch-file=glance_swift.patch ----- - -.Using Cinder backend - -When the {image_service} is deployed with Cinder as a backend in the source environment based -on TripleO, the control plane `glanceAPI` instance should be deployed with the following -configuration: - ----- -.. -spec - glance: - ... - customServiceConfig: | - [DEFAULT] - enabled_backends = default_backend:cinder - [glance_store] - default_backend = default_backend - [default_backend] - rootwrap_config = /etc/glance/rootwrap.conf - description = Default cinder backend - cinder_store_auth_address = {{ .KeystoneInternalURL }} - cinder_store_user_name = {{ .ServiceUser }} - cinder_store_password = {{ .ServicePassword }} - cinder_store_project_name = service - cinder_catalog_info = volumev3::internalURL - cinder_use_multipath = true ----- - -It is recommended to write the patch manifest into a file, for example `glance_cinder.patch`. -For example, the {image_service} deployment with a Cinder backend would look like this: - ----- -spec: - glance: - enabled: true - apiOverride: - route: {} - template: - databaseInstance: openstack - storageClass: "local-storage" - storageRequest: 10G - customServiceConfig: | - [DEFAULT] - enabled_backends = default_backend:cinder - [glance_store] - default_backend = default_backend - [default_backend] - rootwrap_config = /etc/glance/rootwrap.conf - description = Default cinder backend - cinder_store_auth_address = {{ .KeystoneInternalURL }} - cinder_store_user_name = {{ .ServiceUser }} - cinder_store_password = {{ .ServicePassword }} - cinder_store_project_name = service - cinder_catalog_info = volumev3::internalURL - cinder_use_multipath = true - glanceAPIs: - default: - replicas: 1 - override: - service: - internal: - metadata: - annotations: - metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 - spec: - type: LoadBalancer - networkAttachments: - - storage ----- - -Having `Cinder` as a backend establishes a dependency between the two services, -and any deployed `GlanceAPI` instance would **not work** if the {image_service} is -configured with `Cinder` that is still not available in the `OpenStackControlPlane`. -Once Cinder, and in particular `CinderVolume`, has been adopted through the -procedure described in <>, it is possible -to proceed with the `GlanceAPI` adoption. - -Verify that `CinderVolume` is available with the following command: - ----- -$ oc get pod -l component=cinder-volume | grep Running -cinder-volume-75cb47f65-92rxq 3/3 Running 0 ----- - -If the output is similar to the above, it is possible to move forward and patch -the `GlanceAPI` service deployed in the control plane context with the following -command: - ----- -oc patch openstackcontrolplane openstack --type=merge --patch-file=glance_cinder.patch ----- - -.Using NFS backend - -When the source Cloud based on TripleO uses the {image_service} with a NFS backend, before -patching the OpenStackControlPlane to deploy the {image_service} it is important to validate -a few networking related prerequisites. -In the source cloud, verify the NFS parameters used by the overcloud to configure -the {image_service} backend. -In particular, find among the TripleO heat templates the following variables that are usually an override of the default content provided by -`/usr/share/openstack-tripleo-heat-templates/environments/storage/glance-nfs.yaml`[glance-nfs.yaml].: - ---- - -**GlanceBackend**: file - -**GlanceNfsEnabled**: true - -**GlanceNfsShare**: 192.168.24.1:/var/nfs - ---- - -In the example above, as the first variable shows, unlike Cinder, the {image_service} has no -notion of NFS backend: the `File` driver is used in this scenario, and behind the -scenes, the `filesystem_store_datadir` which usually points to `/var/lib/glance/images/` -is mapped to the export value provided by the `GlanceNfsShare` variable. -If the `GlanceNfsShare` is not exported through a network that is supposed to be -propagated to the adopted {rhos_prev_long} control plane, an extra action is required -by the human administrator, who must stop the `nfs-server` and remap the export -to the `storage` network. This action usually happens when the {image_service} is -stopped in the source controller nodes. -In the podified control plane, as per the -(https://github.com/openstack-k8s-operators/docs/blob/main/images/network_diagram.jpg)[network isolation diagram], -the {image_service} is attached to the Storage network, propagated via the associated -`NetworkAttachmentsDefinition` CR, and the resulting Pods have already the right -permissions to handle the Image Service traffic through this network. -In a deployed {OpenStackShort} control plane, you can verify that the network mapping -matches with what has been deployed in the TripleO based environment by checking -both the `NodeNetworkConfigPolicy` (`nncp`) and the `NetworkAttachmentDefinition` -(`net-attach-def`) with the following commands: - -``` -$ oc get nncp -NAME STATUS REASON -enp6s0-crc-8cf2w-master-0 Available SuccessfullyConfigured - -$ oc get net-attach-def -NAME -ctlplane -internalapi -storage -tenant - -$ oc get ipaddresspool -n metallb-system -NAME AUTO ASSIGN AVOID BUGGY IPS ADDRESSES -ctlplane true false ["192.168.122.80-192.168.122.90"] -internalapi true false ["172.17.0.80-172.17.0.90"] -storage true false ["172.18.0.80-172.18.0.90"] -tenant true false ["172.19.0.80-172.19.0.90"] -``` - -The above represents an example of the output that should be checked in the -{OpenShift} environment to make sure there are no issues with the propagated -networks. - -The following steps assume that: - -1. the Storage network has been propagated to the {OpenStackShort} control plane -2. The {image_service} is able to reach the Storage network and connect to the nfs-server - through the port `2049`. - -If the above conditions are met, it is possible to adopt the {image_service} -and create a new `default` `GlanceAPI` instance connected with the existing -NFS share. - ----- -cat << EOF > glance_nfs_patch.yaml - -spec: - extraMounts: - - extraVol: - - extraVolType: Nfs - mounts: - - mountPath: /var/lib/glance/images - name: nfs - propagation: - - Glance - volumes: - - name: nfs - nfs: - path: /var/nfs - server: 172.17.3.20 - name: r1 - region: r1 - glance: - enabled: true - template: - databaseInstance: openstack - customServiceConfig: | - [DEFAULT] - enabled_backends = default_backend:file - [glance_store] - default_backend = default_backend - [default_backend] - filesystem_store_datadir = /var/lib/glance/images/ - storageClass: "local-storage" - storageRequest: 10G - glanceAPIs: - default: - replicas: 1 - type: single - override: - service: - internal: - metadata: - annotations: - metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 - spec: - type: LoadBalancer - networkAttachments: - - storage -EOF ----- - -[NOTE] -Replace in `glance_nfs_patch.yaml` the `nfs/server` ip address with the IP used -to reach the `nfs-server` and make sure the `nfs/path` points to the exported -path in the `nfs-server`. - -Patch `OpenStackControlPlane` to deploy {image_service} with a NFS backend: - ----- -oc patch openstackcontrolplane openstack --type=merge --patch-file glance_nfs_patch.yaml ----- - -When GlanceAPI is active, you can see a single API instance: - -``` -$ oc get pods -l service=glance -NAME READY STATUS RESTARTS -glance-default-single-0 3/3 Running 0 -``` - -and the description of the pod must report: - -``` -Mounts: -... - nfs: - Type: NFS (an NFS mount that lasts the lifetime of a pod) - Server: {{ server ip address }} - Path: {{ nfs export path }} - ReadOnly: false -... -``` - -It is also possible to double check the mountpoint by running the following: - -``` -oc rsh -c glance-api glance-default-single-0 - -sh-5.1# mount -... -... -{{ ip address }}:/var/nfs on /var/lib/glance/images type nfs4 (rw,relatime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.18.0.5,local_lock=none,addr=172.18.0.5) -... -... -``` - -You can run an `openstack image create` command and double check, on the NFS -node, the uuid has been created in the exported directory. - -For example: - -``` -$ oc rsh openstackclient -$ openstack image list - -sh-5.1$ curl -L -o /tmp/cirros-0.5.2-x86_64-disk.img http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img -... -... - -sh-5.1$ openstack image create --container-format bare --disk-format raw --file /tmp/cirros-0.5.2-x86_64-disk.img cirros -... -... - -sh-5.1$ openstack image list -+--------------------------------------+--------+--------+ -| ID | Name | Status | -+--------------------------------------+--------+--------+ -| 634482ca-4002-4a6d-b1d5-64502ad02630 | cirros | active | -+--------------------------------------+--------+--------+ -``` - -On the nfs-server node, the same `uuid` is in the exported `/var/nfs`: - -``` -$ ls /var/nfs/ -634482ca-4002-4a6d-b1d5-64502ad02630 -``` - -.Using Ceph storage backend - -If a Ceph backend is used, the `customServiceConfig` parameter should -be used to inject the right configuration to the `GlanceAPI` instance. - -Make sure the Ceph-related secret (`ceph-conf-files`) was created in -the `openstack` namespace and that the `extraMounts` property of the -`OpenStackControlPlane` CR has been configured properly. These tasks -are described in an earlier Adoption step xref:configuring-a-ceph-backend_migrating-databases[Configuring a Ceph backend]. - ----- -cat << EOF > glance_patch.yaml -spec: - glance: - enabled: true - template: - databaseInstance: openstack - customServiceConfig: | - [DEFAULT] - enabled_backends=default_backend:rbd - [glance_store] - default_backend=default_backend - [default_backend] - rbd_store_ceph_conf=/etc/ceph/ceph.conf - rbd_store_user=openstack - rbd_store_pool=images - store_description=Ceph glance store backend. - storageClass: "local-storage" - storageRequest: 10G - glanceAPIs: - default: - replicas: 1 - override: - service: - internal: - metadata: - annotations: - metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 - spec: - type: LoadBalancer - networkAttachments: - - storage -EOF ----- - -If you have previously backup your {OpenStackShort} services configuration file from the old environment: -xref:reviewing-the-openstack-control-plane-configuration_{context}[Reviewing the OpenStack control plane configruation] you can use os-diff to compare and make sure the configuration is correct. - ----- -pushd os-diff -./os-diff cdiff --service glance -c /tmp/collect_tripleo_configs/glance/etc/glance/glance-api.conf -o glance_patch.yaml ----- - -This will produce the difference between both ini configuration files. - -Patch OpenStackControlPlane to deploy {image_service} with Ceph backend: - ----- -oc patch openstackcontrolplane openstack --type=merge --patch-file glance_patch.yaml ----- - -.Verification - -* Test the glance service from the {OpenStackShort} CLI. - -You can compare and make sure the configuration has been correctly applied to the glance pods by running - ----- -./os-diff cdiff --service glance -c /etc/glance/glance.conf.d/02-config.conf -o glance_patch.yaml --frompod -p glance-api ----- - -If no line appear, then the configuration is correctly done. - -Inspect the resulting glance pods: - ----- -GLANCE_POD=`oc get pod |grep glance-default-external-0 | cut -f 1 -d' '` -oc exec -t $GLANCE_POD -c glance-api -- cat /etc/glance/glance.conf.d/02-config.conf - -[DEFAULT] -enabled_backends=default_backend:rbd -[glance_store] -default_backend=default_backend -[default_backend] -rbd_store_ceph_conf=/etc/ceph/ceph.conf -rbd_store_user=openstack -rbd_store_pool=images -store_description=Ceph glance store backend. - -oc exec -t $GLANCE_POD -c glance-api -- ls /etc/ceph -ceph.client.openstack.keyring -ceph.conf ----- - -Ceph secrets are properly mounted, at this point let's move to the {OpenStackShort} -CLI and check the service is active and the endpoints are properly updated. - ----- -(openstack)$ service list | grep image - -| fc52dbffef36434d906eeb99adfc6186 | glance | image | - -(openstack)$ endpoint list | grep image - -| 569ed81064f84d4a91e0d2d807e4c1f1 | regionOne | glance | image | True | internal | http://glance-internal-openstack.apps-crc.testing | -| 5843fae70cba4e73b29d4aff3e8b616c | regionOne | glance | image | True | public | http://glance-public-openstack.apps-crc.testing | -| 709859219bc24ab9ac548eab74ad4dd5 | regionOne | glance | image | True | admin | http://glance-admin-openstack.apps-crc.testing | ----- - -Check that the images that you previously listed in the source Cloud are available in the adopted service: - ----- -(openstack)$ image list -+--------------------------------------+--------+--------+ -| ID | Name | Status | -+--------------------------------------+--------+--------+ -| c3158cad-d50b-452f-bec1-f250562f5c1f | cirros | active | -+--------------------------------------+--------+--------+ ----- - -* Image upload. -You can test that an image can be created on the adopted service. - ----- -(openstack)$ alias openstack="oc exec -t openstackclient -- openstack" -(openstack)$ curl -L -o /tmp/cirros-0.5.2-x86_64-disk.img http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img - qemu-img convert -O raw /tmp/cirros-0.5.2-x86_64-disk.img /tmp/cirros-0.5.2-x86_64-disk.img.raw - openstack image create --container-format bare --disk-format raw --file /tmp/cirros-0.5.2-x86_64-disk.img.raw cirros2 - openstack image list - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed -100 273 100 273 0 0 1525 0 --:--:-- --:--:-- --:--:-- 1533 - 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 -100 15.5M 100 15.5M 0 0 17.4M 0 --:--:-- --:--:-- --:--:-- 17.4M - -+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| Field | Value | -+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| container_format | bare | -| created_at | 2023-01-31T21:12:56Z | -| disk_format | raw | -| file | /v2/images/46a3eac1-7224-40bc-9083-f2f0cd122ba4/file | -| id | 46a3eac1-7224-40bc-9083-f2f0cd122ba4 | -| min_disk | 0 | -| min_ram | 0 | -| name | cirros | -| owner | 9f7e8fdc50f34b658cfaee9c48e5e12d | -| properties | os_hidden='False', owner_specified.openstack.md5='', owner_specified.openstack.object='images/cirros', owner_specified.openstack.sha256='' | -| protected | False | -| schema | /v2/schemas/image | -| status | queued | -| tags | | -| updated_at | 2023-01-31T21:12:56Z | -| visibility | shared | -+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ - -+--------------------------------------+--------+--------+ -| ID | Name | Status | -+--------------------------------------+--------+--------+ -| 46a3eac1-7224-40bc-9083-f2f0cd122ba4 | cirros2| active | -| c3158cad-d50b-452f-bec1-f250562f5c1f | cirros | active | -+--------------------------------------+--------+--------+ - - -(openstack)$ oc rsh ceph -sh-4.4$ ceph -s -r cluster: - id: 432d9a34-9cee-4109-b705-0c59e8973983 - health: HEALTH_OK - - services: - mon: 1 daemons, quorum a (age 4h) - mgr: a(active, since 4h) - osd: 1 osds: 1 up (since 4h), 1 in (since 4h) - - data: - pools: 5 pools, 160 pgs - objects: 46 objects, 224 MiB - usage: 247 MiB used, 6.8 GiB / 7.0 GiB avail - pgs: 160 active+clean - -sh-4.4$ rbd -p images ls -46a3eac1-7224-40bc-9083-f2f0cd122ba4 -c3158cad-d50b-452f-bec1-f250562f5c1f ----- From e696f8671cee180ddac8bceb66aaf97cf2d4981a Mon Sep 17 00:00:00 2001 From: Katie Gilligan Date: Mon, 29 Apr 2024 14:53:12 -0400 Subject: [PATCH 2/3] restructuring the image service chapter --- .../assembly_adopting-the-image-service.adoc | 194 +----------------- ...pting-image-service-with-ceph-backend.adoc | 71 +++++++ ...mage-service-with-nfs-ganesha-backend.adoc | 121 +++++------ ..._verifying-the-image-service-adoption.adoc | 128 ++++++++++++ 4 files changed, 255 insertions(+), 259 deletions(-) create mode 100644 docs_user/modules/proc_adopting-image-service-with-ceph-backend.adoc create mode 100644 docs_user/modules/proc_verifying-the-image-service-adoption.adoc diff --git a/docs_user/assemblies/assembly_adopting-the-image-service.adoc b/docs_user/assemblies/assembly_adopting-the-image-service.adoc index af9a59ffb..d7aca4c3b 100644 --- a/docs_user/assemblies/assembly_adopting-the-image-service.adoc +++ b/docs_user/assemblies/assembly_adopting-the-image-service.adoc @@ -23,196 +23,12 @@ ifeval::["{build}" != "downstream"] As already done for https://github.com/openstack-k8s-operators/data-plane-adoption/blob/main/keystone_adoption.md[Keystone], the Glance Adoption follows the same pattern. endif::[] -.Using Ceph storage backend - -If a Ceph backend is used, the `customServiceConfig` parameter should -be used to inject the right configuration to the `GlanceAPI` instance. - -Make sure the Ceph-related secret (`ceph-conf-files`) was created in -the `openstack` namespace and that the `extraMounts` property of the -`OpenStackControlPlane` CR has been configured properly. These tasks -are described in an earlier Adoption step xref:configuring-a-ceph-backend_migrating-databases[Configuring a Ceph backend]. - ----- -cat << EOF > glance_patch.yaml -spec: - glance: - enabled: true - template: - databaseInstance: openstack - customServiceConfig: | - [DEFAULT] - enabled_backends=default_backend:rbd - [glance_store] - default_backend=default_backend - [default_backend] - rbd_store_ceph_conf=/etc/ceph/ceph.conf - rbd_store_user=openstack - rbd_store_pool=images - store_description=Ceph glance store backend. - storageClass: "local-storage" - storageRequest: 10G - glanceAPIs: - default: - replicas: 1 - override: - service: - internal: - metadata: - annotations: - metallb.universe.tf/address-pool: internalapi - metallb.universe.tf/allow-shared-ip: internalapi - metallb.universe.tf/loadBalancerIPs: 172.17.0.80 - spec: - type: LoadBalancer - networkAttachments: - - storage -EOF ----- - -If you have previously backup your {OpenStackShort} services configuration file from the old environment, you can use os-diff to compare and make sure the configuration is correct. -For more information, see xref:reviewing-the-openstack-control-plane-configuration_adopt-control-plane[Reviewing the OpenStack control plane configuration]. - ----- -pushd os-diff -./os-diff cdiff --service glance -c /tmp/collect_tripleo_configs/glance/etc/glance/glance-api.conf -o glance_patch.yaml ----- - -This will produce the difference between both ini configuration files. - -Patch OpenStackControlPlane to deploy {image_service} with Ceph backend: - ----- -oc patch openstackcontrolplane openstack --type=merge --patch-file glance_patch.yaml ----- - -.Verification - -* Test the glance service from the {OpenStackShort} CLI. - -You can compare and make sure the configuration has been correctly applied to the glance pods by running - ----- -./os-diff cdiff --service glance -c /etc/glance/glance.conf.d/02-config.conf -o glance_patch.yaml --frompod -p glance-api ----- - -If no line appear, then the configuration is correctly done. - -Inspect the resulting glance pods: - ----- -GLANCE_POD=`oc get pod |grep glance-default-external-0 | cut -f 1 -d' '` -oc exec -t $GLANCE_POD -c glance-api -- cat /etc/glance/glance.conf.d/02-config.conf - -[DEFAULT] -enabled_backends=default_backend:rbd -[glance_store] -default_backend=default_backend -[default_backend] -rbd_store_ceph_conf=/etc/ceph/ceph.conf -rbd_store_user=openstack -rbd_store_pool=images -store_description=Ceph glance store backend. - -oc exec -t $GLANCE_POD -c glance-api -- ls /etc/ceph -ceph.client.openstack.keyring -ceph.conf ----- - -Ceph secrets are properly mounted, at this point let's move to the {OpenStackShort} -CLI and check the service is active and the endpoints are properly updated. - ----- -(openstack)$ service list | grep image - -| fc52dbffef36434d906eeb99adfc6186 | glance | image | - -(openstack)$ endpoint list | grep image - -| 569ed81064f84d4a91e0d2d807e4c1f1 | regionOne | glance | image | True | internal | http://glance-internal-openstack.apps-crc.testing | -| 5843fae70cba4e73b29d4aff3e8b616c | regionOne | glance | image | True | public | http://glance-public-openstack.apps-crc.testing | -| 709859219bc24ab9ac548eab74ad4dd5 | regionOne | glance | image | True | admin | http://glance-admin-openstack.apps-crc.testing | ----- - -Check that the images that you previously listed in the source Cloud are available in the adopted service: - ----- -(openstack)$ image list -+--------------------------------------+--------+--------+ -| ID | Name | Status | -+--------------------------------------+--------+--------+ -| c3158cad-d50b-452f-bec1-f250562f5c1f | cirros | active | -+--------------------------------------+--------+--------+ ----- - -* Image upload. -You can test that an image can be created on the adopted service. - ----- -(openstack)$ alias openstack="oc exec -t openstackclient -- openstack" -(openstack)$ curl -L -o /tmp/cirros-0.5.2-x86_64-disk.img http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img - qemu-img convert -O raw /tmp/cirros-0.5.2-x86_64-disk.img /tmp/cirros-0.5.2-x86_64-disk.img.raw - openstack image create --container-format bare --disk-format raw --file /tmp/cirros-0.5.2-x86_64-disk.img.raw cirros2 - openstack image list - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed -100 273 100 273 0 0 1525 0 --:--:-- --:--:-- --:--:-- 1533 - 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 -100 15.5M 100 15.5M 0 0 17.4M 0 --:--:-- --:--:-- --:--:-- 17.4M - -+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| Field | Value | -+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| container_format | bare | -| created_at | 2023-01-31T21:12:56Z | -| disk_format | raw | -| file | /v2/images/46a3eac1-7224-40bc-9083-f2f0cd122ba4/file | -| id | 46a3eac1-7224-40bc-9083-f2f0cd122ba4 | -| min_disk | 0 | -| min_ram | 0 | -| name | cirros | -| owner | 9f7e8fdc50f34b658cfaee9c48e5e12d | -| properties | os_hidden='False', owner_specified.openstack.md5='', owner_specified.openstack.object='images/cirros', owner_specified.openstack.sha256='' | -| protected | False | -| schema | /v2/schemas/image | -| status | queued | -| tags | | -| updated_at | 2023-01-31T21:12:56Z | -| visibility | shared | -+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ - -+--------------------------------------+--------+--------+ -| ID | Name | Status | -+--------------------------------------+--------+--------+ -| 46a3eac1-7224-40bc-9083-f2f0cd122ba4 | cirros2| active | -| c3158cad-d50b-452f-bec1-f250562f5c1f | cirros | active | -+--------------------------------------+--------+--------+ - - -(openstack)$ oc rsh ceph -sh-4.4$ ceph -s -r cluster: - id: 432d9a34-9cee-4109-b705-0c59e8973983 - health: HEALTH_OK - - services: - mon: 1 daemons, quorum a (age 4h) - mgr: a(active, since 4h) - osd: 1 osds: 1 up (since 4h), 1 in (since 4h) - - data: - pools: 5 pools, 160 pgs - objects: 46 objects, 224 MiB - usage: 247 MiB used, 6.8 GiB / 7.0 GiB avail - pgs: 160 active+clean - -sh-4.4$ rbd -p images ls -46a3eac1-7224-40bc-9083-f2f0cd122ba4 -c3158cad-d50b-452f-bec1-f250562f5c1f ----- - include::../modules/proc_adopting-image-service-with-object-storage-backend.adoc[leveloffset=+1] include::../modules/proc_adopting-image-service-with-block-storage-backend.adoc[leveloffset=+1] -include::../modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc[leveloffset=+1] \ No newline at end of file +include::../modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc[leveloffset=+1] + +include::../modules/proc_adopting-image-service-with-ceph-backend.adoc[leveloffset=+1] + +include::../modules/proc_verifying-the-image-service-adoption.adoc[leveloffset=+1] \ No newline at end of file diff --git a/docs_user/modules/proc_adopting-image-service-with-ceph-backend.adoc b/docs_user/modules/proc_adopting-image-service-with-ceph-backend.adoc new file mode 100644 index 000000000..1bcdd14f2 --- /dev/null +++ b/docs_user/modules/proc_adopting-image-service-with-ceph-backend.adoc @@ -0,0 +1,71 @@ +[id="adopting-image-service-with-ceph-backend_{context}"] + += Adopting the {image_service} that is deployed with a {Ceph} backend + +Adopt the {image_service_first_ref} that you deployed with a {Ceph} backend. Use the `customServiceConfig` parameter to inject the right configuration to the `GlanceAPI` instance. + +.Prerequisites + +* Previous Adoption steps completed. Notably, MariaDB, Keystone and Barbican +should be already adopted. +* Make sure the Ceph-related secret (`ceph-conf-files`) was created in +the `openstack` namespace and that the `extraMounts` property of the +`OpenStackControlPlane` custom resource (CR) has been configured properly. These tasks are described in an earlier Adoption step xref:configuring-a-ceph-backend_migrating-databases[Configuring a Ceph backend]. ++ +---- +cat << EOF > glance_patch.yaml +spec: + glance: + enabled: true + template: + databaseInstance: openstack + customServiceConfig: | + [DEFAULT] + enabled_backends=default_backend:rbd + [glance_store] + default_backend=default_backend + [default_backend] + rbd_store_ceph_conf=/etc/ceph/ceph.conf + rbd_store_user=openstack + rbd_store_pool=images + store_description=Ceph glance store backend. + storageClass: "local-storage" + storageRequest: 10G + glanceAPIs: + default: + replicas: 1 + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + networkAttachments: + - storage +EOF +---- + +[NOTE] +==== +If you have previously backed up your {OpenStackShort} services configuration file from the old environment, you can use os-diff to compare and make sure the configuration is correct. +For more information, see xref:reviewing-the-openstack-control-plane-configuration_adopt-control-plane[Reviewing the OpenStack control plane configuration]. + +---- +pushd os-diff +./os-diff cdiff --service glance -c /tmp/collect_tripleo_configs/glance/etc/glance/glance-api.conf -o glance_patch.yaml +---- + +This produces the difference between both ini configuration files. +==== + +.Procedure + +* Patch `OpenStackControlPlane` CR to deploy {image_service} with a {Ceph} backend: ++ +---- +$ oc patch openstackcontrolplane openstack --type=merge --patch-file glance_patch.yaml +---- \ No newline at end of file diff --git a/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc b/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc index 081fe75b9..5328e0b77 100644 --- a/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc +++ b/docs_user/modules/proc_adopting-image-service-with-nfs-ganesha-backend.adoc @@ -2,24 +2,20 @@ = Adopting the {image_service} that is deployed with an NFS Ganesha backend -Adopt the {image_service_first_ref} that you deployed with an NFS Ganesha backend. When {image_service} is deployed with NFS Ganesha as a backend in the {rhos_prev_long} environment based on {OpenStackPreviousInstaller}, the control plane `glanceAPI` instance is deployed with the following configuration: +Adopt the {image_service_first_ref} that you deployed with an NFS Ganesha backend. The following steps assume that: + +. The Storage network has been propagated to the {OpenStackShort} control plane. +. The {image_service} is able to reach the Storage network and connect to the nfs-server through the port `2049`. .Prerequisites * Previous Adoption steps completed. Notably, MariaDB, Keystone and Barbican should be already adopted. - -.Procedure - -When the source Cloud based on TripleO uses the {image_service} with a NFS Ganesha backend, before -patching the OpenStackControlPlane to deploy the {image_service} it is important to validate -a few networking related prerequisites. -In the source cloud, verify the NFS parameters used by the overcloud to configure -the {image_service} backend. +* In the source cloud, verify the NFS Ganesha parameters used by the overcloud to configure the {image_service} backend. In particular, find among the TripleO heat templates the following variables that are usually an override of the default content provided by -`/usr/share/openstack-tripleo-heat-templates/environments/storage/glance-nfs.yaml`[glance-nfs.yaml].: - ---- +`/usr/share/openstack-tripleo-heat-templates/environments/storage/glance-nfs.yaml`[glance-nfs.yaml]: ++ +---- **GlanceBackend**: file @@ -27,28 +23,20 @@ In particular, find among the TripleO heat templates the following variables tha **GlanceNfsShare**: 192.168.24.1:/var/nfs ---- - -In the example above, as the first variable shows, unlike Cinder, the {image_service} has no -notion of NFS backend: the `File` driver is used in this scenario, and behind the -scenes, the `filesystem_store_datadir` which usually points to `/var/lib/glance/images/` -is mapped to the export value provided by the `GlanceNfsShare` variable. -If the `GlanceNfsShare` is not exported through a network that is supposed to be -propagated to the adopted {rhos_prev_long} control plane, an extra action is required -by the human administrator, who must stop the `nfs-server` and remap the export -to the `storage` network. This action usually happens when the {image_service} is -stopped in the source controller nodes. -In the podified control plane, as per the -(https://github.com/openstack-k8s-operators/docs/blob/main/images/network_diagram.jpg)[network isolation diagram], -the {image_service} is attached to the Storage network, propagated via the associated -`NetworkAttachmentsDefinition` CR, and the resulting Pods have already the right -permissions to handle the Image Service traffic through this network. -In a deployed {OpenStackShort} control plane, you can verify that the network mapping -matches with what has been deployed in the TripleO based environment by checking -both the `NodeNetworkConfigPolicy` (`nncp`) and the `NetworkAttachmentDefinition` -(`net-attach-def`) with the following commands: - -``` +---- ++ +In the example above, as the first variable shows, the {image_service} has no notion of NFS Ganesha backend: the `File` driver is used in this scenario, and behind the scenes, the `filesystem_store_datadir` which usually points to `/var/lib/glance/images/` is mapped to the export value provided by the `GlanceNfsShare` variable. +If the `GlanceNfsShare` is not exported through a network that is supposed to be propagated to the adopted {rhos_prev_long} control plane, an extra action is required by the human administrator, who must stop the `nfs-server` and remap the export to the `storage` network. This action usually happens when the {image_service} is stopped in the source Controller nodes. +ifeval::["{build}" != "downstream"] +In the control plane, as per the (https://github.com/openstack-k8s-operators/docs/blob/main/images/network_diagram.jpg)[network isolation diagram], +the {image_service} is attached to the Storage network, propagated via the associated `NetworkAttachmentsDefinition` custom resource, and the resulting Pods have already the right permissions to handle the {image_service} traffic through this network. +endif::[] +ifeval::["{build}" != "upstream"] +In the control plane, the {image_service} is attached to the Storage network, propagated via the associated `NetworkAttachmentsDefinition` custom resource, and the resulting Pods have already the right permissions to handle the {image_service} traffic through this network. +endif::[] +In a deployed {OpenStackShort} control plane, you can verify that the network mapping matches with what has been deployed in the {OpenStackPreviousInstaller}-based environment by checking both the `NodeNetworkConfigPolicy` (`nncp`) and the `NetworkAttachmentDefinition` (`net-attach-def`): ++ +---- $ oc get nncp NAME STATUS REASON enp6s0-crc-8cf2w-master-0 Available SuccessfullyConfigured @@ -66,22 +54,16 @@ ctlplane true false ["192.168.122.80-192.168.122.90"] internalapi true false ["172.17.0.80-172.17.0.90"] storage true false ["172.18.0.80-172.18.0.90"] tenant true false ["172.19.0.80-172.19.0.90"] -``` - +---- ++ The above represents an example of the output that should be checked in the {OpenShift} environment to make sure there are no issues with the propagated networks. -The following steps assume that: - -1. the Storage network has been propagated to the {OpenStackShort} control plane -2. The {image_service} is able to reach the Storage network and connect to the nfs-server - through the port `2049`. - -If the above conditions are met, it is possible to adopt the {image_service} -and create a new `default` `GlanceAPI` instance connected with the existing -NFS share. +.Procedure +. Adopt the {image_service} and create a new `default` `GlanceAPI` instance connected with the existing NFS Ganesha share. ++ ---- cat << EOF > glance_nfs_patch.yaml @@ -132,29 +114,31 @@ spec: - storage EOF ---- - ++ [NOTE] -Replace in `glance_nfs_patch.yaml` the `nfs/server` ip address with the IP used +Replace in `glance_nfs_patch.yaml` the `nfs/server` IP address with the IP used to reach the `nfs-server` and make sure the `nfs/path` points to the exported path in the `nfs-server`. -Patch `OpenStackControlPlane` to deploy {image_service} with a NFS backend: - +. Patch `OpenStackControlPlane` to deploy {image_service} with a NFS Ganesha backend: ++ ---- -oc patch openstackcontrolplane openstack --type=merge --patch-file glance_nfs_patch.yaml +$ oc patch openstackcontrolplane openstack --type=merge --patch-file glance_nfs_patch.yaml ---- -When GlanceAPI is active, you can see a single API instance: +.Verification -``` +* When GlanceAPI is active, you can see a single API instance: ++ +---- $ oc get pods -l service=glance NAME READY STATUS RESTARTS glance-default-single-0 3/3 Running 0 ``` - +---- and the description of the pod must report: -``` +---- Mounts: ... nfs: @@ -163,11 +147,11 @@ Mounts: Path: {{ nfs export path }} ReadOnly: false ... -``` - -It is also possible to double check the mountpoint by running the following: +---- -``` +* Check the mountpoint: ++ +---- oc rsh -c glance-api glance-default-single-0 sh-5.1# mount @@ -176,14 +160,11 @@ sh-5.1# mount {{ ip address }}:/var/nfs on /var/lib/glance/images type nfs4 (rw,relatime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.18.0.5,local_lock=none,addr=172.18.0.5) ... ... -``` - -You can run an `openstack image create` command and double check, on the NFS -node, the uuid has been created in the exported directory. - -For example: +---- -``` +* Confirm that the UUID has been created in the exported directory on the NFS Ganesha node. For example: ++ +---- $ oc rsh openstackclient $ openstack image list @@ -201,11 +182,11 @@ sh-5.1$ openstack image list +--------------------------------------+--------+--------+ | 634482ca-4002-4a6d-b1d5-64502ad02630 | cirros | active | +--------------------------------------+--------+--------+ -``` - -On the nfs-server node, the same `uuid` is in the exported `/var/nfs`: +---- -``` +* On the nfs-server node, the same `uuid` is in the exported `/var/nfs`: ++ +---- $ ls /var/nfs/ 634482ca-4002-4a6d-b1d5-64502ad02630 -``` +---- diff --git a/docs_user/modules/proc_verifying-the-image-service-adoption.adoc b/docs_user/modules/proc_verifying-the-image-service-adoption.adoc new file mode 100644 index 000000000..205d3556f --- /dev/null +++ b/docs_user/modules/proc_verifying-the-image-service-adoption.adoc @@ -0,0 +1,128 @@ +[id="verifying-the-image-service-adoption_{context}"] + += Verifying the {image_service} adoption + +Verify that you successfully adopted your {image_service_first_ref} to the {rhos_long} {rhos_curr_ver} deployment. +//kgilliga: Does this procedure apply to all backends, or do some verification steps only apply to specific backends? + +.Procedure + +. Test the glance service from the {rhos_prev_long} CLI. You can compare and make sure the configuration has been correctly applied to the glance pods: ++ +---- +./os-diff cdiff --service glance -c /etc/glance/glance.conf.d/02-config.conf -o glance_patch.yaml --frompod -p glance-api +---- ++ +If no line appears, then the configuration is correctly done. + +. Inspect the resulting glance pods and ensure that Ceph secrets are properly mounted: +//kgilliga: Does this step only apply to customers who are using a Ceph backend? ++ +---- +GLANCE_POD=`oc get pod |grep glance-default-external-0 | cut -f 1 -d' '` +oc exec -t $GLANCE_POD -c glance-api -- cat /etc/glance/glance.conf.d/02-config.conf + +[DEFAULT] +enabled_backends=default_backend:rbd +[glance_store] +default_backend=default_backend +[default_backend] +rbd_store_ceph_conf=/etc/ceph/ceph.conf +rbd_store_user=openstack +rbd_store_pool=images +store_description=Ceph glance store backend. + +oc exec -t $GLANCE_POD -c glance-api -- ls /etc/ceph +ceph.client.openstack.keyring +ceph.conf +---- + +. Check that the service is active and the endpoints are properly updated in the {OpenStackShort} CLI: ++ +---- +(openstack)$ service list | grep image + +| fc52dbffef36434d906eeb99adfc6186 | glance | image | + +(openstack)$ endpoint list | grep image + +| 569ed81064f84d4a91e0d2d807e4c1f1 | regionOne | glance | image | True | internal | http://glance-internal-openstack.apps-crc.testing | +| 5843fae70cba4e73b29d4aff3e8b616c | regionOne | glance | image | True | public | http://glance-public-openstack.apps-crc.testing | +| 709859219bc24ab9ac548eab74ad4dd5 | regionOne | glance | image | True | admin | http://glance-admin-openstack.apps-crc.testing | +---- + +. Check that the images that you previously listed in the source Cloud are available in the adopted service: ++ +---- +(openstack)$ image list ++--------------------------------------+--------+--------+ +| ID | Name | Status | ++--------------------------------------+--------+--------+ +| c3158cad-d50b-452f-bec1-f250562f5c1f | cirros | active | ++--------------------------------------+--------+--------+ +---- + +. Test that an image can be created on the adopted service: ++ +---- +(openstack)$ alias openstack="oc exec -t openstackclient -- openstack" +(openstack)$ curl -L -o /tmp/cirros-0.5.2-x86_64-disk.img http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img + qemu-img convert -O raw /tmp/cirros-0.5.2-x86_64-disk.img /tmp/cirros-0.5.2-x86_64-disk.img.raw + openstack image create --container-format bare --disk-format raw --file /tmp/cirros-0.5.2-x86_64-disk.img.raw cirros2 + openstack image list + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 273 100 273 0 0 1525 0 --:--:-- --:--:-- --:--:-- 1533 + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 +100 15.5M 100 15.5M 0 0 17.4M 0 --:--:-- --:--:-- --:--:-- 17.4M + ++------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ +| Field | Value | ++------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ +| container_format | bare | +| created_at | 2023-01-31T21:12:56Z | +| disk_format | raw | +| file | /v2/images/46a3eac1-7224-40bc-9083-f2f0cd122ba4/file | +| id | 46a3eac1-7224-40bc-9083-f2f0cd122ba4 | +| min_disk | 0 | +| min_ram | 0 | +| name | cirros | +| owner | 9f7e8fdc50f34b658cfaee9c48e5e12d | +| properties | os_hidden='False', owner_specified.openstack.md5='', owner_specified.openstack.object='images/cirros', owner_specified.openstack.sha256='' | +| protected | False | +| schema | /v2/schemas/image | +| status | queued | +| tags | | +| updated_at | 2023-01-31T21:12:56Z | +| visibility | shared | ++------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ + ++--------------------------------------+--------+--------+ +| ID | Name | Status | ++--------------------------------------+--------+--------+ +| 46a3eac1-7224-40bc-9083-f2f0cd122ba4 | cirros2| active | +| c3158cad-d50b-452f-bec1-f250562f5c1f | cirros | active | ++--------------------------------------+--------+--------+ + + +(openstack)$ oc rsh ceph +sh-4.4$ ceph -s +r cluster: + id: 432d9a34-9cee-4109-b705-0c59e8973983 + health: HEALTH_OK + + services: + mon: 1 daemons, quorum a (age 4h) + mgr: a(active, since 4h) + osd: 1 osds: 1 up (since 4h), 1 in (since 4h) + + data: + pools: 5 pools, 160 pgs + objects: 46 objects, 224 MiB + usage: 247 MiB used, 6.8 GiB / 7.0 GiB avail + pgs: 160 active+clean + +sh-4.4$ rbd -p images ls +46a3eac1-7224-40bc-9083-f2f0cd122ba4 +c3158cad-d50b-452f-bec1-f250562f5c1f +---- From 2c2c00fbff9cf0c16d6ee3803a147965377c64e6 Mon Sep 17 00:00:00 2001 From: Katie Gilligan Date: Wed, 1 May 2024 15:33:18 -0400 Subject: [PATCH 3/3] incorporating SME comments --- .../proc_verifying-the-image-service-adoption.adoc | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docs_user/modules/proc_verifying-the-image-service-adoption.adoc b/docs_user/modules/proc_verifying-the-image-service-adoption.adoc index 205d3556f..5b2db9acd 100644 --- a/docs_user/modules/proc_verifying-the-image-service-adoption.adoc +++ b/docs_user/modules/proc_verifying-the-image-service-adoption.adoc @@ -3,7 +3,6 @@ = Verifying the {image_service} adoption Verify that you successfully adopted your {image_service_first_ref} to the {rhos_long} {rhos_curr_ver} deployment. -//kgilliga: Does this procedure apply to all backends, or do some verification steps only apply to specific backends? .Procedure @@ -15,8 +14,7 @@ Verify that you successfully adopted your {image_service_first_ref} to the {rhos + If no line appears, then the configuration is correctly done. -. Inspect the resulting glance pods and ensure that Ceph secrets are properly mounted: -//kgilliga: Does this step only apply to customers who are using a Ceph backend? +. Inspect the resulting glance pods: + ---- GLANCE_POD=`oc get pod |grep glance-default-external-0 | cut -f 1 -d' '` @@ -31,7 +29,11 @@ rbd_store_ceph_conf=/etc/ceph/ceph.conf rbd_store_user=openstack rbd_store_pool=images store_description=Ceph glance store backend. +---- +. If you use a Ceph backend, ensure that the Ceph secrets are properly mounted: ++ +---- oc exec -t $GLANCE_POD -c glance-api -- ls /etc/ceph ceph.client.openstack.keyring ceph.conf @@ -61,7 +63,7 @@ ceph.conf | c3158cad-d50b-452f-bec1-f250562f5c1f | cirros | active | +--------------------------------------+--------+--------+ ---- - +ifeval::["{build}" != "downstream"] . Test that an image can be created on the adopted service: + ---- @@ -126,3 +128,4 @@ sh-4.4$ rbd -p images ls 46a3eac1-7224-40bc-9083-f2f0cd122ba4 c3158cad-d50b-452f-bec1-f250562f5c1f ---- +endif::[] \ No newline at end of file