diff --git a/tests/roles/ceph_migrate/defaults/main.yml b/tests/roles/ceph_migrate/defaults/main.yml index 94d16166b..67e07afe2 100644 --- a/tests/roles/ceph_migrate/defaults/main.yml +++ b/tests/roles/ceph_migrate/defaults/main.yml @@ -9,7 +9,7 @@ ceph_daemons_layout: ceph_container_ns: quay.io/ceph ceph_container_image: ceph ceph_container_tag: v18 -ceph_container: "{{ ceph_container_ns }}/{{ceph_container_image}}:{{ceph_container_tag}}" +ceph_container: "{{ ceph_container_ns }}/{{ ceph_container_image }}:{{ ceph_container_tag }}" # ceph_config_tmp_client_home is used to get a tmp ceph_cli that can be run # from a mon (usually controller-0) to connect to a Ceph cluster after the # node is drained and removed diff --git a/tests/roles/ceph_migrate/meta/main.yml b/tests/roles/ceph_migrate/meta/main.yml index c572acc9f..e8bdac6a9 100644 --- a/tests/roles/ceph_migrate/meta/main.yml +++ b/tests/roles/ceph_migrate/meta/main.yml @@ -1,7 +1,7 @@ galaxy_info: - author: your name - description: your role description - company: your company (optional) + author: OpenStack + description: Externalize a Ceph cluster and migrate daemons + company: Red Hat # If the issue tracker for your role is not on github, uncomment the # next line and provide a value @@ -14,9 +14,9 @@ galaxy_info: # - GPL-3.0-only # - Apache-2.0 # - CC-BY-4.0 - license: license (GPL-2.0-or-later, MIT, etc) + license: Apache-2.0 - min_ansible_version: 2.1 + min_ansible_version: '2.9' # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/tests/roles/ceph_migrate/tasks/ceph_cli.yaml b/tests/roles/ceph_migrate/tasks/ceph_cli.yaml index bd5497ce3..77ac4f2fd 100644 --- a/tests/roles/ceph_migrate/tasks/ceph_cli.yaml +++ b/tests/roles/ceph_migrate/tasks/ceph_cli.yaml @@ -19,16 +19,15 @@ ceph_cli: >- {{ ceph_container_cli }} run --rm {{ ceph_container_options }} --volume {{ ceph_config_home }}:/etc/ceph:z - {% if admin_daemon|default(false) %} + {% if admin_daemon | default(false) %} --volume /var/run/ceph/{{ ceph_fsid }}:/var/run/ceph:z {% endif %} --entrypoint {{ ceph_command | default('ceph') }} {{ ceph_container }} - {% if ceph_command|default('ceph') == 'ceph' or ceph_command|default('ceph') == 'rados' or ceph_command|default('ceph') == 'rbd' -%} - {% if not admin_daemon|default(false) -%} - --fsid {{ ceph_fsid }} -c /etc/ceph/{{ ceph_cluster }}.conf -k /etc/ceph/{{ ceph_cluster }}.client.{{ select_keyring| default('admin') }}.keyring - {%- endif %} - {% if external_cluster|default(false) -%} - -n client.{{ select_keyring }} - {%- endif %} + {% if ceph_command | default('ceph') == 'ceph' or ceph_command | default('ceph') == 'rados' or ceph_command | default('ceph') == 'rbd' -%} + {% if not admin_daemon | default(false) -%} + --fsid {{ ceph_fsid }} -c /etc/ceph/{{ ceph_cluster }}.conf -k /etc/ceph/{{ ceph_cluster }}.client.{{ select_keyring | default('admin') }}.keyring {%- endif %} + {% if external_cluster | default(false) -%} + -n client.{{ select_keyring }} + {%- endif %}{%- endif %} diff --git a/tests/roles/ceph_migrate/tasks/ceph_client.yaml b/tests/roles/ceph_migrate/tasks/ceph_client.yaml index 1caee0494..9208661dc 100644 --- a/tests/roles/ceph_migrate/tasks/ceph_client.yaml +++ b/tests/roles/ceph_migrate/tasks/ceph_client.yaml @@ -17,11 +17,12 @@ delegate_to: "{{ cur_mon.split('.')[0] }}.ctlplane" block: - name: TMP_CLIENT - Patch os-net-config config and setup a tmp client IP - lineinfile: + ansible.builtin.lineinfile: dest: "{{ os_net_conf_path }}" insertafter: "{{ ceph_storage_net_prefix }}" line: " - ip_netmask: {{ ceph_client_ip }}/24" - backup: yes + mode: '0666' + backup: true - name: TMP_CLIENT - Refresh os-net-config ansible.builtin.command: @@ -37,6 +38,7 @@ ansible.builtin.file: path: "{{ ceph_config_tmp_client_home }}" state: directory + mode: '0755' - name: Check file in the src directory ansible.builtin.find: paths: /etc/ceph @@ -45,10 +47,11 @@ - name: Backup ceph client data when: dir_ceph_files.files | length > 0 become: true - copy: + ansible.builtin.copy: remote_src: true src: "/etc/ceph/{{ item }}" dest: "{{ ceph_config_tmp_client_home }}" + mode: '0666' loop: - ceph.conf - ceph.client.admin.keyring diff --git a/tests/roles/ceph_migrate/tasks/ceph_containers.yaml b/tests/roles/ceph_migrate/tasks/ceph_containers.yaml index 0dea218f3..c7fd4d4b9 100644 --- a/tests/roles/ceph_migrate/tasks/ceph_containers.yaml +++ b/tests/roles/ceph_migrate/tasks/ceph_containers.yaml @@ -15,13 +15,13 @@ # under the License. - name: Get ceph_cli - include_tasks: ceph_cli.yaml + ansible.builtin.include_tasks: ceph_cli.yaml vars: ceph_fsid: "{{ mon_dump.fsid }}" ceph_cluster: ceph - name: set container image base in ceph configuration - command: | + ansible.builtin.command: | {{ ceph_cli }} config set mgr mgr/cephadm/container_image_base {{ ceph_container }} changed_when: false become: true @@ -31,25 +31,25 @@ become: true block: - name: set alertmanager container image in ceph configuration - command: | + ansible.builtin.command: | {{ ceph_cli }} config set mgr mgr/cephadm/container_image_alertmanager \ {{ ceph_alertmanager_container_image }} changed_when: false - name: set grafana container image in ceph configuration - command: | + ansible.builtin.command: | {{ ceph_cli }} config set mgr mgr/cephadm/container_image_grafana \ {{ ceph_grafana_container_image }} changed_when: false - name: set node-exporter container image in ceph configuration - command: | + ansible.builtin.command: | {{ ceph_cli }} config set mgr mgr/cephadm/container_image_node_exporter \ {{ ceph_node_exporter_container_image }} changed_when: false - name: set prometheus container image in ceph configuration - command: | + ansible.builtin.command: | {{ ceph_cli }} config set mgr mgr/cephadm/container_image_prometheus \ {{ ceph_prometheus_container_image }} changed_when: false diff --git a/tests/roles/ceph_migrate/tasks/ceph_load.yaml b/tests/roles/ceph_migrate/tasks/ceph_load.yaml index 76099d571..55d08966b 100644 --- a/tests/roles/ceph_migrate/tasks/ceph_load.yaml +++ b/tests/roles/ceph_migrate/tasks/ceph_load.yaml @@ -15,6 +15,7 @@ remote_src: true src: "{{ ceph_config_tmp_client_home }}/{{ item }}" dest: "/etc/ceph" + mode: '0644' loop: - ceph.conf - ceph.client.admin.keyring @@ -75,15 +76,16 @@ become: true ansible.builtin.command: | cephadm shell -- ceph orch host ls -f json - register: hosts - - ansible.builtin.set_fact: - hosts: "{{ hosts.stdout | from_json }}" + register: nds + - name: Load nodes + ansible.builtin.set_fact: + nds: "{{ nds.stdout | from_json }}" - name: Load hostmap List ansible.builtin.set_fact: hostmap: |- {% set hm = {} %} - {% for h in hosts %} - {% set _ = hm.__setitem__(h.hostname,h.labels) %} + {% for h in nds %} + {% set _ = hm.__setitem__(h.hostname, h.labels) %} {% endfor %} {{ hm }} - name: Print Host Map @@ -100,7 +102,8 @@ ansible.builtin.command: cephadm shell -- ceph mon dump -f json register: mons - - ansible.builtin.set_fact: + - name: Get Monmap + ansible.builtin.set_fact: mon_dump: "{{ mons.stdout | from_json }}" - name: Print monmap when: debug | default(false) diff --git a/tests/roles/ceph_migrate/tasks/ceph_validate.yaml b/tests/roles/ceph_migrate/tasks/ceph_validate.yaml index 55a4304ca..cbd6f5785 100644 --- a/tests/roles/ceph_migrate/tasks/ceph_validate.yaml +++ b/tests/roles/ceph_migrate/tasks/ceph_validate.yaml @@ -1,86 +1,101 @@ -- name: Fail if input is not provided - fail: +- name: ansible.builtin.fail if input is not provided + ansible.builtin.fail: msg: "Input not ready" when: ceph is undefined or ceph | length == 0 - name: Validate SRC Ceph Cluster HEALTH + tags: + - ceph_health block: - - ansible.builtin.debug: + - name: Get cluster health + ansible.builtin.debug: msg: "{{ ceph.health.status }}" when: debug | default(false) - - name: Fail if health is HEALTH_WARN || HEALTH_ERR - fail: + - name: ansible.builtin.fail if health is HEALTH_WARN || HEALTH_ERR + ansible.builtin.fail: msg: Ceph is in {{ ceph.health.status }} state. when: - ceph.health.status == 'HEALTH_WARN' or ceph.health.status == 'HEALTH_ERR' - name: Validate SRC Ceph Cluster PGMAP + when: ceph.pgmap is defined + vars: + pgstate: "{{ ceph | community.general.json_query('pgmap.pgs_by_state[0].state_name') }}" block: - - ansible.builtin.debug: - msg: "{{ ceph.pgmap.pgs_by_state[0].state_name }}" + - name: PgMap when: debug | default(false) + ansible.builtin.debug: + msg: "PGs State is: {{ pgstate }}" - - name: Fail if PGs are not in active+clean state - fail: - msg: "{{ PGs State is: ceph.pgmap.pgs_by_state[0].state_name }}" - when: ceph.pgmap.pgs_by_state[0].state_name != 'active+clean' + - name: ansible.builtin.fail if PGs are not in active+clean state + when: pgstate != 'active+clean' + ansible.builtin.fail: + msg: "PGs State is: {{ pgstate }}" - name: Validate SRC Ceph Cluster OSDMAP + when: ceph.osdmap is defined + vars: + num_up_osds: "{{ ceph | community.general.json_query('osdmap.num_up_osds') | default(0) }}" + num_osds: "{{ ceph | community.general.json_query('osdmap.num_osds') | default(1) }}" + pct: "{{ (num_up_osds | float | default(0.0) / num_osds | float | default(1.0)) * 100 }}" block: - - ansible.builtin.debug: - msg: "{{ ceph.osdmap.num_up_osds | default(0) / ceph.osdmap.num_osds | default(0) }}" - when: debug | default(false) + - name: OSDMap + ansible.builtin.debug: + msg: "{{ pct }}" + when: debug | default(true) - - name: Fail if there is an unacceptable OSDs number - fail: - msg: "{{ ceph.osdmap.num_up_osds|default(0) }} / {{ ceph.osdmap.num_osds|default(0) }} OSDs available" - when: osds | float < 1.0 - vars: - osds: "{{ ceph.osdmap.num_osds / ceph.osdmap.num_up_osds }}" + - name: ansible.builtin.fail if there is an unacceptable OSDs number + ansible.builtin.fail: + msg: "{{ pct }} % OSDs available" + when: pct | float < 100 - name: Validate the Ceph Cluster release block: - - ansible.builtin.debug: + - name: MonMap + ansible.builtin.debug: msg: "{{ ceph.monmap.min_mon_release_name }}" when: debug | default(false) - - name: Fail if Ceph <= Quincy - fail: - msg: "{{ Ceph version is <= Reef }}" + - name: ansible.builtin.fail if Ceph <= Quincy + ansible.builtin.fail: + msg: "Ceph version is <= Reef" when: ceph.monmap.min_mon_release_name != "quincy" - name: Mons are in quorum block: - - ansible.builtin.debug: + - name: Mons in quorum + ansible.builtin.debug: msg: "{{ ceph.monmap.num_mons }}" when: debug | default(false) - - name: Fail if Mons are not in quorum - fail: + - name: ansible.builtin.fail if Mons are not in quorum + ansible.builtin.fail: msg: "Mons: {{ ceph.monmap.num_mons }}" when: ceph.monmap.num_mons != decomm_nodes | length - name: Mgr is active block: - - ansible.builtin.debug: + - name: is Ceph Mgr available + ansible.builtin.debug: msg: "{{ ceph.mgrmap.available }}" when: debug | default(false) - - name: Fail if Mgr is not available - fail: + - name: ansible.builtin.fail if Mgr is not available + ansible.builtin.fail: msg: "{{ ceph.mgrmap.available }}" when: not ceph.mgrmap.available | bool | default(false) - name: No event in progress block: - - ansible.builtin.debug: + - name: in progress events + ansible.builtin.debug: msg: "{{ ceph.progress_events | length }}" when: debug | default(false) - - name: Fail if there are in progress events - fail: + - name: ansible.builtin.fail if there are in progress events + ansible.builtin.fail: msg: "{{ ceph.progress_events }}" when: ceph.progress_events | length > 0 diff --git a/tests/roles/ceph_migrate/tasks/drain.yaml b/tests/roles/ceph_migrate/tasks/drain.yaml index d83bf180a..111671f5e 100644 --- a/tests/roles/ceph_migrate/tasks/drain.yaml +++ b/tests/roles/ceph_migrate/tasks/drain.yaml @@ -1,6 +1,6 @@ # Refresh the client to point to a diff config-home - name: Get ceph_cli - include_tasks: ceph_cli.yaml + ansible.builtin.include_tasks: ceph_cli.yaml vars: ceph_config_home: "{{ ceph_config_tmp_client_home }}" ceph_fsid: "{{ mon_dump.fsid }}" @@ -12,7 +12,7 @@ vars: nodes: - "{{ host }}" - action: "rm" + act: "rm" labels: - "mon" - "mgr" @@ -23,11 +23,11 @@ become: true ansible.builtin.command: "{{ ceph_cli }} orch daemon rm mon.{{ host.split('.')[0] }} --force" - ignore_errors: true + # ignore_errors: true # Sleep before moving to the next mon - name: Pause - pause: + ansible.builtin.pause: seconds: "{{ ceph_timeout }}" - name: DRAIN - Drain the host diff --git a/tests/roles/ceph_migrate/tasks/firewall.yaml b/tests/roles/ceph_migrate/tasks/firewall.yaml index 4817b2bfb..cc74c1b65 100644 --- a/tests/roles/ceph_migrate/tasks/firewall.yaml +++ b/tests/roles/ceph_migrate/tasks/firewall.yaml @@ -7,7 +7,7 @@ become: true block: - name: Ensure firewall is enabled/started - systemd: + ansible.builtin.systemd: name: iptables state: started enabled: true @@ -42,13 +42,13 @@ become: true block: - name: Save firewall rules ipv4 - command: /usr/libexec/iptables/iptables.init save + ansible.builtin.command: /usr/libexec/iptables/iptables.init save - name: Save firewall rules ipv6 - command: /usr/libexec/iptables/ip6tables.init save + ansible.builtin.command: /usr/libexec/iptables/ip6tables.init save - name: Reload iptables service - systemd: + ansible.builtin.systemd: daemon_reload: true enabled: true name: "{{ item }}" diff --git a/tests/roles/ceph_migrate/tasks/labels.yaml b/tests/roles/ceph_migrate/tasks/labels.yaml index 808ac6a15..d1d0db2f7 100644 --- a/tests/roles/ceph_migrate/tasks/labels.yaml +++ b/tests/roles/ceph_migrate/tasks/labels.yaml @@ -17,7 +17,7 @@ - name: Print INPUT when: debug | default(true) block: - - name: Set/Unset labels - {{ action }} + - name: Set/Unset labels - {{ act }} ansible.builtin.debug: msg: "{{ labels }}" @@ -25,8 +25,8 @@ ansible.builtin.debug: msg: "{{ nodes }}" -- name: Set/Unset labels - {{ action }} +- name: Set/Unset labels - {{ act }} become: true ansible.builtin.command: - "{{ ceph_cli }} orch host label {{ action }} {{ item.0 }} {{ item.1 }}" - loop: "{{ nodes |default([]) | product(labels|default([])) | list }}" + "{{ ceph_cli }} orch host label {{ act }} {{ item.0 }} {{ item.1 }}" + loop: "{{ nodes | default([]) | product(labels | default([])) | list }}" diff --git a/tests/roles/ceph_migrate/tasks/mds.yaml b/tests/roles/ceph_migrate/tasks/mds.yaml index 6be0567db..4dd402342 100644 --- a/tests/roles/ceph_migrate/tasks/mds.yaml +++ b/tests/roles/ceph_migrate/tasks/mds.yaml @@ -1,6 +1,6 @@ # Get a fresh ceph_cli - name: Get ceph_cli - include_tasks: ceph_cli.yaml + ansible.builtin.include_tasks: ceph_cli.yaml vars: ceph_fsid: "{{ mon_dump.fsid }}" ceph_cluster: ceph @@ -22,8 +22,8 @@ {% set mds = {} %} {% for item in servicemap %} {% if item.service_type == 'mds' %} - {% set _ = mds.__setitem__('service_type',item.service_type) %} - {% set _ = mds.__setitem__('service_name',item.service_name) %} + {% set _ = mds.__setitem__('service_type', item.service_type) %} + {% set _ = mds.__setitem__('service_name', item.service_name) %} {% set _ = mds.__setitem__('spec', {}) %} {% endif %} {% endfor %} @@ -36,10 +36,10 @@ # - Expand labels to the rest of the hosts - name: Expand MDS labels to the overcloud nodes - import_tasks: labels.yaml + ansible.builtin.import_tasks: labels.yaml vars: nodes: "{{ hostmap.keys() }}" - action: "add" + act: "add" labels: - "mds" @@ -111,15 +111,16 @@ # Remove labels from the controller nodes: it will force a failover and migrate # the existing active MDS daemon to the target node - name: Remove MDS label from the overcloud nodes - import_tasks: labels.yaml + ansible.builtin.import_tasks: labels.yaml vars: nodes: "{{ decomm_nodes }}" - action: "rm" + act: "rm" labels: - "mds" # Wait for the redeploy to finish before moving to the next stage -- ansible.builtin.include_tasks: wait_daemons.yaml +- name: Wait daemons + ansible.builtin.include_tasks: wait_daemons.yaml vars: daemon: mds daemon_id: diff --git a/tests/roles/ceph_migrate/tasks/mgr.yaml b/tests/roles/ceph_migrate/tasks/mgr.yaml index 5543284a7..019c38042 100644 --- a/tests/roles/ceph_migrate/tasks/mgr.yaml +++ b/tests/roles/ceph_migrate/tasks/mgr.yaml @@ -1,6 +1,6 @@ # Get a fresh ceph_cli - name: Get ceph_cli - include_tasks: ceph_cli.yaml + ansible.builtin.include_tasks: ceph_cli.yaml vars: ceph_fsid: "{{ mon_dump.fsid }}" ceph_cluster: ceph @@ -19,7 +19,7 @@ ansible.builtin.include_tasks: labels.yaml vars: nodes: "{{ hostmap.keys() | difference(decomm_nodes) }}" - action: "add" + act: "add" labels: - "mgr" @@ -31,8 +31,8 @@ {% set mgr = {} %} {% for item in servicemap %} {% if item.service_type == 'mgr' %} - {% set _ = mgr.__setitem__('service_type',item.service_type) %} - {% set _ = mgr.__setitem__('service_name',item.service_name) %} + {% set _ = mgr.__setitem__('service_type', item.service_type) %} + {% set _ = mgr.__setitem__('service_name', item.service_name) %} {% set _ = mgr.__setitem__('spec', {}) %} {% endif %} {% endfor %} @@ -61,7 +61,8 @@ when: debug | default(false) # Wait for the redeploy to finish before moving to the next stage -- ansible.builtin.include_tasks: wait_daemons.yaml +- name: MGR - wait daemons + ansible.builtin.include_tasks: wait_daemons.yaml vars: daemon: mgr daemon_id: diff --git a/tests/roles/ceph_migrate/tasks/mon.yaml b/tests/roles/ceph_migrate/tasks/mon.yaml index 60d88cb6a..b713eb7b3 100644 --- a/tests/roles/ceph_migrate/tasks/mon.yaml +++ b/tests/roles/ceph_migrate/tasks/mon.yaml @@ -1,6 +1,6 @@ # Get a client using -v /home/tripleo-admin/ceph_config:/etc/ceph:z as input - name: Get ceph_cli - include_tasks: ceph_cli.yaml + ansible.builtin.include_tasks: ceph_cli.yaml vars: ceph_config_home: "{{ ceph_config_tmp_client_home }}" ceph_fsid: "{{ mon_dump.fsid }}" @@ -21,7 +21,7 @@ vars: nodes: - "{{ target_node }}" - action: "add" + act: "add" labels: - "mon" - "mgr" @@ -48,6 +48,7 @@ ansible.builtin.file: path: "{{ ceph_config_tmp_client_home }}" state: directory + mode: '0755' - name: Check file in the src directory ansible.builtin.find: paths: /etc/ceph @@ -60,6 +61,7 @@ remote_src: true src: "/etc/ceph/{{ item }}" dest: "{{ ceph_config_tmp_client_home }}" + mode: '0644' loop: - ceph.conf - ceph.client.admin.keyring @@ -93,7 +95,7 @@ # Drain and rm the cur_mon from the Ceph cluster - name: MON - Drain and rm --force the cur_mon host - #when: cur_mon in decomm_nodes + # when: cur_mon in decomm_nodes ansible.builtin.include_tasks: drain.yaml vars: host: "{{ cur_mon }}" @@ -103,17 +105,18 @@ # The node should be empty at this point, let's remove it from the Ceph # cluster - name: MON - rm the cur_mon host from the Ceph cluster - #when: cur_mon in decomm_nodes + # when: cur_mon in decomm_nodes become: true ansible.builtin.command: "{{ ceph_cli }} orch host rm {{ cur_mon }} --force" # let's ignore this for now (for idempotency purposes) - ignore_errors: true + # TODO: have a good condition here instead of ignore_errors + # ignore_errors: true tags: - ceph_drain - name: MON - Get current mon IP address - #when: cur_mon in decomm_nodes + # when: cur_mon in decomm_nodes ansible.builtin.set_fact: mon_ipaddr: "{{ mon_ip | split(':') | first | ansible.utils.ipaddr }}" vars: @@ -124,12 +127,12 @@ {% endif %} {% endfor %} -- name: MON - Move the mon IP Address from {{ cur_mon }} to {{ target_node }} +- name: MON - Move the mon IP Address to {{ target_node }} become: true # if no mon addr, this variable is False and the whole block is skipped # because there's no network related action that should be performed when: - #- cur_mon in decomm_nodes + # - cur_mon in decomm_nodes - mon_ipaddr | default('') block: - name: MON - Get current mon IP address @@ -139,11 +142,11 @@ - name: MON - Patch os-net-config config and REMOVE the current mon IP address delegate_to: "{{ cur_mon.split('.')[0] }}.ctlplane" - lineinfile: + ansible.builtin.lineinfile: path: "{{ os_net_conf_path }}" state: absent regexp: '{{ mon_ipaddr }}/24' - backup: yes + backup: true - name: MON - Refresh os-net-config delegate_to: "{{ cur_mon.split('.')[0] }}.ctlplane" @@ -152,11 +155,11 @@ - name: MON - Patch os-net-config config and add the mon IP delegate_to: "{{ target_node.split('.')[0] }}.ctlplane" - lineinfile: + ansible.builtin.lineinfile: dest: "{{ os_net_conf_path }}" insertafter: "{{ ceph_storage_net_prefix }}" line: " - ip_netmask: {{ mon_ipaddr }}/24" - backup: yes + backup: true - name: MON - Refresh os-net-config delegate_to: "{{ target_node.split('.')[0] }}.ctlplane" @@ -171,7 +174,7 @@ - name: MON - Fail if the IP address is not active in the target node when: ping_target_ip.rc != 0 - fail: + ansible.builtin.fail: msg: "Can't reach the mon IP on the target node" - name: Redeploy MON @@ -228,13 +231,14 @@ become: true ansible.builtin.command: "{{ ceph_cli }} orch daemon rm mon.{{ target_node.split('.')[0] }} --force" - ignore_errors: true + # TODO: replace ignore_errors with a condition + # ignore_errors: true - name: Wait for the spec to be updated - pause: + ansible.builtin.pause: seconds: "{{ ceph_wait_mon_timeout }}" - - name: MON - Redeploy mon on {{ target_node }} - Print command + - name: MON - Redeploy mon on {{ target_node }} when: debug | default(true) ansible.builtin.debug: msg: "{{ ceph_cli }} orch daemon add mon {{ target_node.split('.')[0] }}:{{ mon_ipaddr }}" @@ -247,7 +251,7 @@ "{{ ceph_cli }} orch daemon add mon {{ target_node.split('.')[0] }}:{{ mon_ipaddr }}" - name: Wait for the spec to be updated - pause: + ansible.builtin.pause: seconds: "{{ ceph_wait_mon_timeout }}" - name: MON - check mons quorum @@ -290,7 +294,8 @@ CEPH_CONF: "{{ ceph_config_tmp_client_home }}" # Wait for the redeploy to finish before moving to the next stage -- ansible.builtin.include_tasks: wait_daemons.yaml +- name: MON - wait daemons + ansible.builtin.include_tasks: wait_daemons.yaml vars: daemon: mon daemon_id: "{{ target_node.split('.')[0] }}" diff --git a/tests/roles/ceph_migrate/tasks/monitoring.yaml b/tests/roles/ceph_migrate/tasks/monitoring.yaml index d222b6549..a6fcbebfa 100644 --- a/tests/roles/ceph_migrate/tasks/monitoring.yaml +++ b/tests/roles/ceph_migrate/tasks/monitoring.yaml @@ -1,6 +1,6 @@ # Get a fresh ceph_cli - name: Get ceph_cli - include_tasks: ceph_cli.yaml + ansible.builtin.include_tasks: ceph_cli.yaml vars: ceph_fsid: "{{ mon_dump.fsid }}" ceph_cluster: ceph @@ -10,7 +10,7 @@ ansible.builtin.import_tasks: labels.yaml vars: nodes: "{{ hostmap.keys() | difference(decomm_nodes) }}" - action: "add" + act: "add" labels: - "monitoring" @@ -40,8 +40,8 @@ {% for item in servicemap %} {% if (item.service_type == 'grafana') or (item.service_type == 'prometheus') or (item.service_type == 'alertmanager') %} {% set cur = {} %} - {% set _ = cur.__setitem__('service_type',item.service_type) %} - {% set _ = cur.__setitem__('service_name',item.service_name) %} + {% set _ = cur.__setitem__('service_type', item.service_type) %} + {% set _ = cur.__setitem__('service_name', item.service_name) %} {% set _ = cur.__setitem__('networks', item.networks) %} {% set _ = cur.__setitem__('spec', item.spec) %} {% set _ = monitoring_stack.__setitem__(item.service_type, cur) %} @@ -81,9 +81,9 @@ - "{{ prometheus }}" - "{{ alertmanager }}" vars: - - grafana: "{% set grf = monitoring_stack.get('grafana') %}{{ grf }}" - - prometheus: "{% set prom = monitoring_stack.get('prometheus') %}{{ prom }}" - - alertmanager: "{% set alm = monitoring_stack.get('alertmanager') %}{{ alm }}" + grafana: "{% set grf = monitoring_stack.get('grafana') %}{{ grf }}" + prometheus: "{% set prom = monitoring_stack.get('prometheus') %}{{ prom }}" + alertmanager: "{% set alm = monitoring_stack.get('alertmanager') %}{{ alm }}" - name: Print the resulting spec ansible.builtin.debug: @@ -91,7 +91,8 @@ when: debug | default(true) # Wait for the redeploy to finish before moving to the next stage -- ansible.builtin.include_tasks: wait_daemons.yaml +- name: MONITORING - wait daemons + ansible.builtin.include_tasks: wait_daemons.yaml vars: daemon: "{{ item }}" daemon_id: diff --git a/tests/roles/ceph_migrate/tasks/rbd.yaml b/tests/roles/ceph_migrate/tasks/rbd.yaml index c751268ca..f4d181a52 100644 --- a/tests/roles/ceph_migrate/tasks/rbd.yaml +++ b/tests/roles/ceph_migrate/tasks/rbd.yaml @@ -4,16 +4,15 @@ # complete the migration (either because of cardinality of just because we # don't have enough nodes). - name: Setup a Ceph client to the first node - block: - - ansible.builtin.include_tasks: ceph_client.yaml - vars: - # Use the inventory as source of truth to make sure we **always** - # select mon[0] as client. If we stick on decomm_nodes nodes we - # might end up selecting a different node on multiple runs. - cur_mon: "{{ groups['mon'][0] | default(decomm_nodes | list | sort | first) }}" - tags: - - ceph_client - - ceph_rbd + ansible.builtin.include_tasks: ceph_client.yaml + vars: + # Use the inventory as source of truth to make sure we **always** + # select mon[0] as client. If we stick on decomm_nodes nodes we + # might end up selecting a different node on multiple runs. + cur_mon: "{{ groups['mon'][0] | default(decomm_nodes | list | sort | first) }}" + tags: + - ceph_client + - ceph_rbd # Extend mgr deployment to help with failover - name: MGR - Migrate RBD node @@ -46,8 +45,8 @@ {% set mon = {} %} {% for item in servicemap %} {% if item.service_type == 'mon' %} - {% set _ = mon.__setitem__('service_type',item.service_type) %} - {% set _ = mon.__setitem__('service_name',item.service_name) %} + {% set _ = mon.__setitem__('service_type', item.service_type) %} + {% set _ = mon.__setitem__('service_name', item.service_name) %} {% set _ = mon.__setitem__('spec', {}) %} {% endif %} {% endfor %} @@ -75,6 +74,8 @@ # we need to serially migrate mons, so we loop over the nodes and run the # procedure provided by mon.yaml - name: MON - Migrate RBD node + tags: + - ceph_rbd block: - name: MON - Migrate RBD node ansible.builtin.include_tasks: mon.yaml @@ -82,8 +83,8 @@ cur_mon: "{{ node.0 }}" target_node: "{{ node.1 }}" # This condition might be a different one - loop: "{{ decomm_nodes| zip(target_nodes) }}" - #loop: "{{ decomm_nodes|zip(hostmap.keys() | difference(decomm_nodes) | sort) | list }}" + loop: "{{ decomm_nodes | zip(target_nodes) }}" + # loop: "{{ decomm_nodes|zip(hostmap.keys() | difference(decomm_nodes) | sort) | list }}" loop_control: loop_var: node @@ -91,8 +92,5 @@ # cephadm to converge to a state that sees the new data. We can replace # this check later in time with a dynamic wait() based on cephadm - name: Sleep before moving to the next mon - pause: + ansible.builtin.pause: seconds: "{{ ceph_timeout }}" - tags: - - ceph_rbd - diff --git a/tests/roles/ceph_migrate/tasks/rgw.yaml b/tests/roles/ceph_migrate/tasks/rgw.yaml index d2dc79e83..c859b432e 100644 --- a/tests/roles/ceph_migrate/tasks/rgw.yaml +++ b/tests/roles/ceph_migrate/tasks/rgw.yaml @@ -1,13 +1,13 @@ # Get a fresh ceph_cli - name: Get ceph_cli - include_tasks: ceph_cli.yaml + ansible.builtin.include_tasks: ceph_cli.yaml vars: ceph_fsid: "{{ mon_dump.fsid }}" ceph_cluster: ceph # Check if all the required data are present - name: Fail if RGW VIPs are not defined - fail: + ansible.builtin.fail: msg: "No RGW VIPs have been selected" when: - ceph_rgw_virtual_ips_list | length == 0 @@ -28,10 +28,10 @@ # - Expand labels to the target nodes - name: Apply RGW label to the target nodes - import_tasks: labels.yaml + ansible.builtin.import_tasks: labels.yaml vars: nodes: "{{ hostmap.keys() | difference(decomm_nodes) }}" - action: "add" + act: "add" labels: - "rgw" @@ -43,10 +43,10 @@ {% set rgw = {} %} {% for item in servicemap %} {% if item.service_type == 'rgw' %} - {% set _ = rgw.__setitem__('service_type',item.service_type) %} - {% set _ = rgw.__setitem__('service_name',item.service_name) %} + {% set _ = rgw.__setitem__('service_type', item.service_type) %} + {% set _ = rgw.__setitem__('service_name', item.service_name) %} {% set _ = rgw.__setitem__('networks', item.networks) %} - {% set _ = item.spec.__setitem__('rgw_frontend_port', 8090) %} + {% set _ = item.spec.__setitem__('rgw_frontend_port', 8090) %} {% set _ = rgw.__setitem__('spec', item.spec) %} {% endif %} {% endfor %} @@ -57,14 +57,14 @@ ansible.builtin.debug: msg: "{{ rgw_spec }}" -#- name: Apply ceph rgw keystone config -# #become: true -# ansible.builtin.command: | -# echo "{{ ceph_cli }} config set global rgw_keystone_url {{ ceph_keystone_ep }}" -# changed_when: false -# when: -# - ceph_keystone_ep is defined -# - ceph_keystone_ep | length > 0 +# - name: Apply ceph rgw keystone config +# #become: true +# ansible.builtin.command: | +# echo "{{ ceph_cli }} config set global rgw_keystone_url {{ ceph_keystone_ep }}" +# changed_when: false +# when: +# - ceph_keystone_ep is defined +# - ceph_keystone_ep | length > 0 # Update and apply the spec: it will update the Monitoring Stack deployment, # and place daemons on the target nodes @@ -122,11 +122,12 @@ - ceph_rgw_ingress - name: Wait for cephadm to redeploy - pause: + ansible.builtin.pause: seconds: "{{ ceph_timeout }}" # Wait for the redeploy to finish before moving to the next stage -- ansible.builtin.include_tasks: wait_daemons.yaml +- name: RGW - wait daemons + ansible.builtin.include_tasks: wait_daemons.yaml vars: daemon: rgw daemon_id: diff --git a/tests/roles/ceph_migrate/tasks/wait_daemons.yaml b/tests/roles/ceph_migrate/tasks/wait_daemons.yaml index 3e7b1a867..192a09fcf 100644 --- a/tests/roles/ceph_migrate/tasks/wait_daemons.yaml +++ b/tests/roles/ceph_migrate/tasks/wait_daemons.yaml @@ -1,12 +1,12 @@ -- name: print command - when: debug | default(true) +- name: print daemon id option + when: debug | default(false) ansible.builtin.debug: - msg: "Checking daemon: {{ d_id }}" + msg: "daemon id: {{ d_id }}" vars: d_id: |- {% set did = "" %} {% if daemon_id != None %} - --daemon_id {{ daemon_id }} + --daemon_id {{ daemon_id }} {% endif %} {{ did }} @@ -23,6 +23,6 @@ d_id: |- {% set did = "" %} {% if daemon_id != None %} - --daemon_id {{ daemon_id }} + --daemon_id {{ daemon_id }} {% endif %} {{ did }} diff --git a/tests/roles/ceph_migrate/tests/test.yml b/tests/roles/ceph_migrate/tests/test.yml deleted file mode 100644 index 2703acd07..000000000 --- a/tests/roles/ceph_migrate/tests/test.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- hosts: localhost - remote_user: root - roles: - - ceph_migrate