From d984b14a76dfe59aee13bdb27ea943c57c4dce54 Mon Sep 17 00:00:00 2001 From: Dimitri Savineau Date: Wed, 9 Dec 2020 14:14:15 -0500 Subject: [PATCH] ceph-releases: remove unused ceph releases This branch only uses the nautilus release so we don't need those files. Signed-off-by: Dimitri Savineau --- .../centos/daemon-base/__ISCSI_PACKAGES__ | 1 - .../daemon-base/__CEPH_BASE_PACKAGES__ | 16 -- .../daemon-base/__CEPH_MGR_PACKAGES__ | 1 - .../luminous/daemon/config.static.sh | 118 ---------- .../luminous/daemon/entrypoint.sh.in | 204 ------------------ .../daemon/osd_scenarios/osd_disk_activate.sh | 85 -------- .../luminous/daemon/start_rbd_mirror.sh | 32 --- .../luminous/daemon/start_restapi.sh | 28 --- .../luminous/daemon/variables_entrypoint.sh | 121 ----------- .../centos/daemon-base/__ISCSI_PACKAGES__ | 1 - .../mimic/daemon-base/__CEPH_BASE_PACKAGES__ | 16 -- .../mimic/daemon-base/__CEPH_MGR_PACKAGES__ | 1 - ceph-releases/mimic/daemon/config.static.sh | 118 ---------- .../daemon/osd_scenarios/osd_disk_activate.sh | 85 -------- .../mimic/daemon/start_rbd_mirror.sh | 32 --- .../mimic/daemon/variables_entrypoint.sh | 116 ---------- 16 files changed, 975 deletions(-) delete mode 100644 ceph-releases/luminous/centos/daemon-base/__ISCSI_PACKAGES__ delete mode 100644 ceph-releases/luminous/daemon-base/__CEPH_BASE_PACKAGES__ delete mode 100644 ceph-releases/luminous/daemon-base/__CEPH_MGR_PACKAGES__ delete mode 100755 ceph-releases/luminous/daemon/config.static.sh delete mode 100755 ceph-releases/luminous/daemon/entrypoint.sh.in delete mode 100644 ceph-releases/luminous/daemon/osd_scenarios/osd_disk_activate.sh delete mode 100755 ceph-releases/luminous/daemon/start_rbd_mirror.sh delete mode 100755 ceph-releases/luminous/daemon/start_restapi.sh delete mode 100755 ceph-releases/luminous/daemon/variables_entrypoint.sh delete mode 100644 ceph-releases/mimic/centos/daemon-base/__ISCSI_PACKAGES__ delete mode 100644 ceph-releases/mimic/daemon-base/__CEPH_BASE_PACKAGES__ delete mode 100644 ceph-releases/mimic/daemon-base/__CEPH_MGR_PACKAGES__ delete mode 100755 ceph-releases/mimic/daemon/config.static.sh delete mode 100644 ceph-releases/mimic/daemon/osd_scenarios/osd_disk_activate.sh delete mode 100755 ceph-releases/mimic/daemon/start_rbd_mirror.sh delete mode 100755 ceph-releases/mimic/daemon/variables_entrypoint.sh diff --git a/ceph-releases/luminous/centos/daemon-base/__ISCSI_PACKAGES__ b/ceph-releases/luminous/centos/daemon-base/__ISCSI_PACKAGES__ deleted file mode 100644 index d7b0e44d4..000000000 --- a/ceph-releases/luminous/centos/daemon-base/__ISCSI_PACKAGES__ +++ /dev/null @@ -1 +0,0 @@ -tcmu-runner ceph-iscsi-config ceph-iscsi-cli python-rtslib diff --git a/ceph-releases/luminous/daemon-base/__CEPH_BASE_PACKAGES__ b/ceph-releases/luminous/daemon-base/__CEPH_BASE_PACKAGES__ deleted file mode 100644 index 3b112f80b..000000000 --- a/ceph-releases/luminous/daemon-base/__CEPH_BASE_PACKAGES__ +++ /dev/null @@ -1,16 +0,0 @@ -\ - ca-certificates \ - e2fsprogs \ - ceph-common__ENV_[CEPH_POINT_RELEASE]__ \ - ceph-mon__ENV_[CEPH_POINT_RELEASE]__ \ - ceph-osd__ENV_[CEPH_POINT_RELEASE]__ \ - ceph-mds__ENV_[CEPH_POINT_RELEASE]__ \ - rbd-mirror__ENV_[CEPH_POINT_RELEASE]__ \ - __CEPH_MGR_PACKAGES__\ - kmod \ - lvm2 \ - gdisk \ - __RADOSGW_PACKAGES__ \ - __GANESHA_PACKAGES__ \ - __ISCSI_PACKAGES__ \ - __CSI_PACKAGES__ diff --git a/ceph-releases/luminous/daemon-base/__CEPH_MGR_PACKAGES__ b/ceph-releases/luminous/daemon-base/__CEPH_MGR_PACKAGES__ deleted file mode 100644 index 1ce4f9d7e..000000000 --- a/ceph-releases/luminous/daemon-base/__CEPH_MGR_PACKAGES__ +++ /dev/null @@ -1 +0,0 @@ -ceph-mgr__ENV_[CEPH_POINT_RELEASE]__ diff --git a/ceph-releases/luminous/daemon/config.static.sh b/ceph-releases/luminous/daemon/config.static.sh deleted file mode 100755 index f039d35be..000000000 --- a/ceph-releases/luminous/daemon/config.static.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash -set -e - -function get_admin_key { - # No-op for static - log "static: does not generate the admin key, so we can not get it." - log "static: make it available with the help of your configuration management system." - log "static: ceph-ansible is a good candidate to deploy a containerized version of Ceph." - log "static: ceph-ansible will help you fetching the keys and push them on the right nodes." - log "static: if you're interested, please visit: https://github.com/ceph/ceph-ansible" -} - -function get_mon_config { - # IPv4 is the default unless we specify it - IP_LEVEL=${1:-4} - - if [ ! -e /etc/ceph/"${CLUSTER}".conf ]; then - local fsid - fsid=$(uuidgen) - if [[ "$CEPH_DAEMON" == demo ]]; then - fsid=$(uuidgen) - cat </etc/ceph/"${CLUSTER}".conf -[global] -fsid = $fsid -mon initial members = ${MON_NAME} -mon host = ${MON_IP} -osd crush chooseleaf type = 0 -osd journal size = 100 -public network = ${CEPH_PUBLIC_NETWORK} -cluster network = ${CEPH_PUBLIC_NETWORK} -log file = /dev/null -osd pool default size = 1 -ENDHERE - - # For ext4 - if [ "$(findmnt -n -o FSTYPE -T /var/lib/ceph)" = "ext4" ]; then - cat <> /etc/ceph/"${CLUSTER}".conf -osd max object name len = 256 -osd max object namespace len = 64 -ENDHERE - fi - else - cat </etc/ceph/"${CLUSTER}".conf -[global] -fsid = $fsid -mon initial members = ${MON_NAME} -mon host = ${MON_IP} -public network = ${CEPH_PUBLIC_NETWORK} -cluster network = ${CEPH_CLUSTER_NETWORK} -osd journal size = ${OSD_JOURNAL_SIZE} -log file = /dev/null -ENDHERE - fi - if [ "$IP_LEVEL" -eq 6 ]; then - echo "ms bind ipv6 = true" >> /etc/ceph/"${CLUSTER}".conf - fi - else - # extract fsid from ceph.conf - fsid=$(grep "fsid" /etc/ceph/"${CLUSTER}".conf | awk '{print $NF}') - fi - - CLI+=("--set-uid=0") - - if [ ! -e "$ADMIN_KEYRING" ]; then - if [ -z "$ADMIN_SECRET" ]; then - # Automatically generate administrator key - CLI+=(--gen-key) - else - # Generate custom provided administrator key - CLI+=("--add-key=$ADMIN_SECRET") - fi - ceph-authtool "$ADMIN_KEYRING" --create-keyring -n client.admin "${CLI[@]}" --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' - fi - - if [ ! -e "$MON_KEYRING" ]; then - # Generate the mon. key - ceph-authtool "$MON_KEYRING" --create-keyring --gen-key -n mon. --cap mon 'allow *' - fi - - if [ ! -e "$OSD_BOOTSTRAP_KEYRING" ]; then - # Generate the OSD bootstrap key - ceph-authtool "$OSD_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-osd --cap mon 'allow profile bootstrap-osd' - fi - - if [ ! -e "$MDS_BOOTSTRAP_KEYRING" ]; then - # Generate the MDS bootstrap key - ceph-authtool "$MDS_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-mds --cap mon 'allow profile bootstrap-mds' - fi - - if [ ! -e "$RGW_BOOTSTRAP_KEYRING" ]; then - # Generate the RGW bootstrap key - ceph-authtool "$RGW_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-rgw --cap mon 'allow profile bootstrap-rgw' - fi - - if [ ! -e "$RBD_MIRROR_BOOTSTRAP_KEYRING" ]; then - # Generate the RBD Mirror bootstrap key - ceph-authtool "$RBD_MIRROR_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-rbd --cap mon 'allow profile bootstrap-rbd' - fi - # Apply proper permissions to the keys - chown "${CHOWN_OPT[@]}" ceph. "$MON_KEYRING" "$OSD_BOOTSTRAP_KEYRING" "$MDS_BOOTSTRAP_KEYRING" "$RGW_BOOTSTRAP_KEYRING" "$RBD_MIRROR_BOOTSTRAP_KEYRING" - - if [ ! -e "$MONMAP" ]; then - if [ -e /etc/ceph/monmap ]; then - # Rename old monmap - mv /etc/ceph/monmap "$MONMAP" - else - # Generate initial monitor map - monmaptool --create --add "${MON_NAME}" "${MON_IP}:6789" --fsid "${fsid}" "$MONMAP" - fi - chown "${CHOWN_OPT[@]}" ceph. "$MONMAP" - fi -} - -function get_config { - # No-op for static - log "static: does not generate config" -} - diff --git a/ceph-releases/luminous/daemon/entrypoint.sh.in b/ceph-releases/luminous/daemon/entrypoint.sh.in deleted file mode 100755 index 4e9b95f81..000000000 --- a/ceph-releases/luminous/daemon/entrypoint.sh.in +++ /dev/null @@ -1,204 +0,0 @@ -#!/bin/bash - -# We need the -m to track child process in docker_exec.sh -# It is expected to receive some SIGCHLD, so -m is mandatory -set -me -export LC_ALL=C - -source /opt/ceph-container/bin/variables_entrypoint.sh -source /opt/ceph-container/bin/common_functions.sh -source /opt/ceph-container/bin/docker_exec.sh -source /opt/ceph-container/bin/debug.sh - -########################### -# CONFIGURATION GENERATOR # -########################### - -# Load in the bootstrapping routines -# based on the data store -case "$KV_TYPE" in - etcd) - # TAG: kv_type_etcd - source /opt/ceph-container/bin/config.kv.etcd.sh - ;; - k8s|kubernetes) - # TAG: kv_type_k8s - source /opt/ceph-container/bin/config.k8s.sh - ;; - *) - source /opt/ceph-container/bin/config.static.sh - ;; -esac - - -############### -# CEPH_DAEMON # -############### - -# Normalize DAEMON to lowercase -CEPH_DAEMON=$(to_lowercase "${CEPH_DAEMON}") - -create_mandatory_directories - -if [[ ! "x86_64 aarch64" =~ $CEPH_ARCH ]] ; then - echo "$CEPH_DAEMON is not supported on $CEPH_ARCH" >&2 - exit 1 -fi - -# If we are given a valid first argument, set the -# CEPH_DAEMON variable from it -case "$CEPH_DAEMON" in - populate_kvstore) - # TAG: populate_kvstore - source /opt/ceph-container/bin/populate_kv.sh - populate_kv - ;; - mon) - # TAG: mon - source /opt/ceph-container/bin/start_mon.sh - start_mon - ;; - osd) - # TAG: osd - source /opt/ceph-container/bin/start_osd.sh - start_osd - ;; - osd_directory) - # TAG: osd_directory - source /opt/ceph-container/bin/start_osd.sh - OSD_TYPE="directory" - start_osd - ;; - osd_directory_single) - # TAG: osd_directory_single - source /opt/ceph-container/bin/start_osd.sh - OSD_TYPE="directory_single" - start_osd - ;; - osd_ceph_disk) - # TAG: osd_ceph_disk - source /opt/ceph-container/bin/start_osd.sh - OSD_TYPE="disk" - start_osd - ;; - osd_ceph_disk_prepare) - # TAG: osd_ceph_disk_prepare - source /opt/ceph-container/bin/start_osd.sh - OSD_TYPE="prepare" - start_osd - ;; - osd_ceph_disk_activate) - # TAG: osd_ceph_disk_activate - source /opt/ceph-container/bin/start_osd.sh - OSD_TYPE="activate" - start_osd - ;; - osd_ceph_activate_journal) - # TAG: osd_ceph_activate_journal - source /opt/ceph-container/bin/start_osd.sh - OSD_TYPE="activate_journal" - start_osd - ;; - osd_ceph_volume_activate) - ami_privileged - # shellcheck disable=SC1091 - # TAG: osd_ceph_volume_activate - source /opt/ceph-container/bin/osd_volume_activate.sh - osd_volume_activate - ;; - mds) - # TAG: mds - source /opt/ceph-container/bin/start_mds.sh - start_mds - ;; - rgw) - # TAG: rgw - source /opt/ceph-container/bin/start_rgw.sh - start_rgw - ;; - rgw_user) - # TAG: rgw_user - source /opt/ceph-container/bin/start_rgw.sh - create_rgw_user - ;; - restapi) - # TAG: restapi - source /opt/ceph-container/bin/start_restapi.sh - start_restapi - ;; - rbd_mirror) - # TAG: rbd_mirror - source /opt/ceph-container/bin/start_rbd_mirror.sh - start_rbd_mirror - ;; - nfs) - # TAG: nfs - source /opt/ceph-container/bin/start_nfs.sh - start_nfs - ;; - zap_device) - # TAG: zap_device - source /opt/ceph-container/bin/zap_device.sh - zap_device - ;; - mon_health) - # TAG: mon_health - source /opt/ceph-container/bin/watch_mon_health.sh - watch_mon_health - ;; - mgr) - # TAG: mgr - source /opt/ceph-container/bin/start_mgr.sh - start_mgr - ;; - disk_introspection) - # TAG: disk_introspection - if [[ "$KV_TYPE" =~ k8s|kubernetes ]]; then - source /opt/ceph-container/bin/disk_introspection.sh - else - log "You can not use the disk introspection method outside a Kubernetes environment" - log "Make sure KV_TYPE equals either k8s or kubernetes" - fi - ;; - demo) - # TAG: demo - source /opt/ceph-container/bin/demo.sh - ;; - disk_list) - # TAG: disk_list - source /opt/ceph-container/bin/disk_list.sh - start_disk_list - ;; - tcmu_runner) - # TAG: tcmu_runner - if is_redhat; then - source /opt/ceph-container/bin/start_tcmu_runner.sh - start_tcmu_runner - else - log "ERROR: tcmu_runner scenario is only available on Red Hat systems." - fi - ;; - rbd_target_api) - # TAG: rbd_target_api - if is_redhat; then - source /opt/ceph-container/bin/start_rbd_target_api.sh - start_rbd_target_api - else - log "ERROR: rbd_target_api scenario is only available on Red Hat systems." - fi - ;; - rbd_target_gw) - # TAG: rbd_target_gw - if is_redhat; then - source /opt/ceph-container/bin/start_rbd_target_gw.sh - start_rbd_target_gw - else - log "ERROR: rbd_target_gw scenario is only available on Red Hat systems." - fi - ;; - *) - invalid_ceph_daemon - ;; -esac - -exit 0 diff --git a/ceph-releases/luminous/daemon/osd_scenarios/osd_disk_activate.sh b/ceph-releases/luminous/daemon/osd_scenarios/osd_disk_activate.sh deleted file mode 100644 index faeb8c561..000000000 --- a/ceph-releases/luminous/daemon/osd_scenarios/osd_disk_activate.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash -# shellcheck disable=SC2034 -set -e -source /opt/ceph-container/bin/disk_list.sh - -function osd_activate { - if [[ -z "${OSD_DEVICE}" ]] || [[ ! -b "${OSD_DEVICE}" ]]; then - log "ERROR: you either provided a non-existing device or no device at all." - log "You must provide a device to build your OSD ie: /dev/sdb" - exit 1 - fi - - CEPH_DISK_OPTIONS=() - - if [[ ${OSD_FILESTORE} -eq 1 ]] && [[ ${OSD_DMCRYPT} -eq 0 ]]; then - if [[ -n "${OSD_JOURNAL}" ]]; then - CLI+=("${OSD_JOURNAL}") - else - CLI+=("${OSD_DEVICE}") - fi - export DISK_LIST_SEARCH=journal - start_disk_list - JOURNAL_PART=$(start_disk_list) - unset DISK_LIST_SEARCH - JOURNAL_UUID=$(get_part_uuid "${JOURNAL_PART}") - fi - - # creates /dev/mapper/ for dmcrypt - # usually after a reboot they don't go created - udevadm trigger - - # watch the udev event queue, and exit if all current events are handled - udevadm settle --timeout=600 - - DATA_PART=$(dev_part "${OSD_DEVICE}" 1) - MOUNTED_PART=${DATA_PART} - - if [[ ${OSD_DMCRYPT} -eq 1 ]] && [[ ${OSD_FILESTORE} -eq 1 ]]; then - get_dmcrypt_filestore_uuid - mount_lockbox "$DATA_UUID" "$LOCKBOX_UUID" - CEPH_DISK_OPTIONS+=('--dmcrypt') - MOUNTED_PART="/dev/mapper/${DATA_UUID}" - open_encrypted_parts_filestore - elif [[ ${OSD_DMCRYPT} -eq 1 ]] && [[ ${OSD_BLUESTORE} -eq 1 ]]; then - get_dmcrypt_bluestore_uuid - mount_lockbox "$DATA_UUID" "$LOCKBOX_UUID" - CEPH_DISK_OPTIONS+=('--dmcrypt') - MOUNTED_PART="/dev/mapper/${DATA_UUID}" - open_encrypted_parts_bluestore - fi - - if [[ -z "${CEPH_DISK_OPTIONS[*]}" ]]; then - ceph-disk -v --setuser ceph --setgroup disk activate --no-start-daemon "${DATA_PART}" - else - ceph-disk -v --setuser ceph --setgroup disk activate "${CEPH_DISK_OPTIONS[@]}" --no-start-daemon "${DATA_PART}" - fi - - actual_part=$(readlink -f "${MOUNTED_PART}") - OSD_ID=$(grep "${actual_part}" /proc/mounts | awk '{print $2}' | sed -r 's/^.*-([0-9]+)$/\1/') - - if [[ ${OSD_BLUESTORE} -eq 1 ]]; then - # Get the device used for block db and wal otherwise apply_ceph_ownership_to_disks will fail - OSD_BLUESTORE_BLOCK_DB_TMP=$(resolve_symlink "${OSD_PATH}block.db") -# shellcheck disable=SC2034 - OSD_BLUESTORE_BLOCK_DB=${OSD_BLUESTORE_BLOCK_DB_TMP%?} -# shellcheck disable=SC2034 - OSD_BLUESTORE_BLOCK_WAL_TMP=$(resolve_symlink "${OSD_PATH}block.wal") -# shellcheck disable=SC2034 - OSD_BLUESTORE_BLOCK_WAL=${OSD_BLUESTORE_BLOCK_WAL_TMP%?} - fi - apply_ceph_ownership_to_disks - - log "SUCCESS" - # This ensures all resources have been unmounted after the OSD has exited - # We define `sigterm_cleanup_post` here because: - # - we want to 'protect' the following `exec` in particular. - # - having the cleaning code just next to the concerned function in the same file is nice. - function sigterm_cleanup_post { - local osd_mnt - osd_mnt=$(df --output=target | grep '/var/lib/ceph/osd/') - log "osd_disk_activate: Unmounting $osd_mnt" - umount "$osd_mnt" || (log "osd_disk_activate: Failed to umount $osd_mnt"; lsof "$osd_mnt") - } - exec /usr/bin/ceph-osd "${CLI_OPTS[@]}" -f -i "${OSD_ID}" --setuser ceph --setgroup disk -} diff --git a/ceph-releases/luminous/daemon/start_rbd_mirror.sh b/ceph-releases/luminous/daemon/start_rbd_mirror.sh deleted file mode 100755 index d535bfa41..000000000 --- a/ceph-releases/luminous/daemon/start_rbd_mirror.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -set -e - -function start_rbd_mirror { - get_config - check_config - - if [ "${CEPH_GET_ADMIN_KEY}" -eq 1 ]; then - # ensure we have the admin key - get_admin_key - check_admin_key - fi - - if [ ! -e "$RBD_MIRROR_KEYRING" ]; then - - if [ ! -e "$RBD_MIRROR_BOOTSTRAP_KEYRING" ]; then - log "ERROR- $RBD_MIRROR_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rbd -o $RBD_MIRROR_BOOTSTRAP_KEYRING'" - exit 1 - fi - - ceph_health client.bootstrap-rbd "$RBD_MIRROR_BOOTSTRAP_KEYRING" - - # Generate the rbd mirror key - ceph "${CLI_OPTS[@]}" --name client.bootstrap-rbd --keyring "$RBD_MIRROR_BOOTSTRAP_KEYRING" auth get-or-create client.rbd-mirror."${RBD_MIRROR_NAME}" mon 'profile rbd' osd 'profile rbd' -o "$RBD_MIRROR_KEYRING" - chown "${CHOWN_OPT[@]}" ceph. "$RBD_MIRROR_KEYRING" - chmod 0600 "$RBD_MIRROR_KEYRING" - fi - - log "SUCCESS" - # start rbd-mirror - exec /usr/bin/rbd-mirror "${DAEMON_OPTS[@]}" -n client.rbd-mirror."${RBD_MIRROR_NAME}" -} diff --git a/ceph-releases/luminous/daemon/start_restapi.sh b/ceph-releases/luminous/daemon/start_restapi.sh deleted file mode 100755 index 2704f938d..000000000 --- a/ceph-releases/luminous/daemon/start_restapi.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -set -e - -function start_restapi { - get_config - check_config - - # Ensure we have the admin key - get_admin_key - check_admin_key - - # Check to see if we need to add a [client.restapi] section; add, if necessary - if ! grep -qE "\[client.restapi\]" /etc/ceph/"${CLUSTER}".conf; then - cat <>/etc/ceph/"${CLUSTER}".conf - -[client.restapi] - public addr = ${RESTAPI_IP}:${RESTAPI_PORT} - restapi base url = ${RESTAPI_BASE_URL} - restapi log level = ${RESTAPI_LOG_LEVEL} - log file = ${RESTAPI_LOG_FILE} -ENDHERE - fi - - log "SUCCESS" - - # start ceph-rest-api - exec /usr/bin/ceph-rest-api "${CLI_OPTS[@]}" -n client.admin -} diff --git a/ceph-releases/luminous/daemon/variables_entrypoint.sh b/ceph-releases/luminous/daemon/variables_entrypoint.sh deleted file mode 100755 index 291fb8d5d..000000000 --- a/ceph-releases/luminous/daemon/variables_entrypoint.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/bash - - -################################### -# LIST OF ALL SCENARIOS AVAILABLE # -################################### - -ALL_SCENARIOS="populate_kvstore mon osd osd_directory osd_directory_single osd_ceph_disk osd_ceph_disk_prepare osd_ceph_disk_activate osd_ceph_activate_journal mds rgw rgw_user restapi nfs zap_device mon_health mgr disk_introspection demo disk_list tcmu_runner rbd_target_api rbd_target_gw" - - -######################### -# LIST OF ALL VARIABLES # -######################### - -HOSTNAME=$(uname -n | cut -d'.' -f1) -HOST_FQDN=$(/dev/null; then - PYTHON=python -else - PYTHON=python3 -fi - -# Internal variables -MDS_KEYRING=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring -ADMIN_KEYRING=/etc/ceph/${CLUSTER}.client.admin.keyring -MON_KEYRING=/etc/ceph/${CLUSTER}.mon.keyring -RGW_KEYRING=/var/lib/ceph/radosgw/${CLUSTER}-rgw.${RGW_NAME}/keyring -MDS_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring -RGW_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring -OSD_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring -RBD_MIRROR_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-rbd/${CLUSTER}.keyring -OSD_PATH_BASE=/var/lib/ceph/osd/${CLUSTER} -MONMAP=/etc/ceph/monmap-${CLUSTER} -MGR_KEYRING=/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}/keyring -RBD_MIRROR_KEYRING=/etc/ceph/${CLUSTER}.client.rbd-mirror.${HOSTNAME}.keyring diff --git a/ceph-releases/mimic/centos/daemon-base/__ISCSI_PACKAGES__ b/ceph-releases/mimic/centos/daemon-base/__ISCSI_PACKAGES__ deleted file mode 100644 index d7b0e44d4..000000000 --- a/ceph-releases/mimic/centos/daemon-base/__ISCSI_PACKAGES__ +++ /dev/null @@ -1 +0,0 @@ -tcmu-runner ceph-iscsi-config ceph-iscsi-cli python-rtslib diff --git a/ceph-releases/mimic/daemon-base/__CEPH_BASE_PACKAGES__ b/ceph-releases/mimic/daemon-base/__CEPH_BASE_PACKAGES__ deleted file mode 100644 index 3b112f80b..000000000 --- a/ceph-releases/mimic/daemon-base/__CEPH_BASE_PACKAGES__ +++ /dev/null @@ -1,16 +0,0 @@ -\ - ca-certificates \ - e2fsprogs \ - ceph-common__ENV_[CEPH_POINT_RELEASE]__ \ - ceph-mon__ENV_[CEPH_POINT_RELEASE]__ \ - ceph-osd__ENV_[CEPH_POINT_RELEASE]__ \ - ceph-mds__ENV_[CEPH_POINT_RELEASE]__ \ - rbd-mirror__ENV_[CEPH_POINT_RELEASE]__ \ - __CEPH_MGR_PACKAGES__\ - kmod \ - lvm2 \ - gdisk \ - __RADOSGW_PACKAGES__ \ - __GANESHA_PACKAGES__ \ - __ISCSI_PACKAGES__ \ - __CSI_PACKAGES__ diff --git a/ceph-releases/mimic/daemon-base/__CEPH_MGR_PACKAGES__ b/ceph-releases/mimic/daemon-base/__CEPH_MGR_PACKAGES__ deleted file mode 100644 index 1ce4f9d7e..000000000 --- a/ceph-releases/mimic/daemon-base/__CEPH_MGR_PACKAGES__ +++ /dev/null @@ -1 +0,0 @@ -ceph-mgr__ENV_[CEPH_POINT_RELEASE]__ diff --git a/ceph-releases/mimic/daemon/config.static.sh b/ceph-releases/mimic/daemon/config.static.sh deleted file mode 100755 index f039d35be..000000000 --- a/ceph-releases/mimic/daemon/config.static.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash -set -e - -function get_admin_key { - # No-op for static - log "static: does not generate the admin key, so we can not get it." - log "static: make it available with the help of your configuration management system." - log "static: ceph-ansible is a good candidate to deploy a containerized version of Ceph." - log "static: ceph-ansible will help you fetching the keys and push them on the right nodes." - log "static: if you're interested, please visit: https://github.com/ceph/ceph-ansible" -} - -function get_mon_config { - # IPv4 is the default unless we specify it - IP_LEVEL=${1:-4} - - if [ ! -e /etc/ceph/"${CLUSTER}".conf ]; then - local fsid - fsid=$(uuidgen) - if [[ "$CEPH_DAEMON" == demo ]]; then - fsid=$(uuidgen) - cat </etc/ceph/"${CLUSTER}".conf -[global] -fsid = $fsid -mon initial members = ${MON_NAME} -mon host = ${MON_IP} -osd crush chooseleaf type = 0 -osd journal size = 100 -public network = ${CEPH_PUBLIC_NETWORK} -cluster network = ${CEPH_PUBLIC_NETWORK} -log file = /dev/null -osd pool default size = 1 -ENDHERE - - # For ext4 - if [ "$(findmnt -n -o FSTYPE -T /var/lib/ceph)" = "ext4" ]; then - cat <> /etc/ceph/"${CLUSTER}".conf -osd max object name len = 256 -osd max object namespace len = 64 -ENDHERE - fi - else - cat </etc/ceph/"${CLUSTER}".conf -[global] -fsid = $fsid -mon initial members = ${MON_NAME} -mon host = ${MON_IP} -public network = ${CEPH_PUBLIC_NETWORK} -cluster network = ${CEPH_CLUSTER_NETWORK} -osd journal size = ${OSD_JOURNAL_SIZE} -log file = /dev/null -ENDHERE - fi - if [ "$IP_LEVEL" -eq 6 ]; then - echo "ms bind ipv6 = true" >> /etc/ceph/"${CLUSTER}".conf - fi - else - # extract fsid from ceph.conf - fsid=$(grep "fsid" /etc/ceph/"${CLUSTER}".conf | awk '{print $NF}') - fi - - CLI+=("--set-uid=0") - - if [ ! -e "$ADMIN_KEYRING" ]; then - if [ -z "$ADMIN_SECRET" ]; then - # Automatically generate administrator key - CLI+=(--gen-key) - else - # Generate custom provided administrator key - CLI+=("--add-key=$ADMIN_SECRET") - fi - ceph-authtool "$ADMIN_KEYRING" --create-keyring -n client.admin "${CLI[@]}" --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' - fi - - if [ ! -e "$MON_KEYRING" ]; then - # Generate the mon. key - ceph-authtool "$MON_KEYRING" --create-keyring --gen-key -n mon. --cap mon 'allow *' - fi - - if [ ! -e "$OSD_BOOTSTRAP_KEYRING" ]; then - # Generate the OSD bootstrap key - ceph-authtool "$OSD_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-osd --cap mon 'allow profile bootstrap-osd' - fi - - if [ ! -e "$MDS_BOOTSTRAP_KEYRING" ]; then - # Generate the MDS bootstrap key - ceph-authtool "$MDS_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-mds --cap mon 'allow profile bootstrap-mds' - fi - - if [ ! -e "$RGW_BOOTSTRAP_KEYRING" ]; then - # Generate the RGW bootstrap key - ceph-authtool "$RGW_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-rgw --cap mon 'allow profile bootstrap-rgw' - fi - - if [ ! -e "$RBD_MIRROR_BOOTSTRAP_KEYRING" ]; then - # Generate the RBD Mirror bootstrap key - ceph-authtool "$RBD_MIRROR_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-rbd --cap mon 'allow profile bootstrap-rbd' - fi - # Apply proper permissions to the keys - chown "${CHOWN_OPT[@]}" ceph. "$MON_KEYRING" "$OSD_BOOTSTRAP_KEYRING" "$MDS_BOOTSTRAP_KEYRING" "$RGW_BOOTSTRAP_KEYRING" "$RBD_MIRROR_BOOTSTRAP_KEYRING" - - if [ ! -e "$MONMAP" ]; then - if [ -e /etc/ceph/monmap ]; then - # Rename old monmap - mv /etc/ceph/monmap "$MONMAP" - else - # Generate initial monitor map - monmaptool --create --add "${MON_NAME}" "${MON_IP}:6789" --fsid "${fsid}" "$MONMAP" - fi - chown "${CHOWN_OPT[@]}" ceph. "$MONMAP" - fi -} - -function get_config { - # No-op for static - log "static: does not generate config" -} - diff --git a/ceph-releases/mimic/daemon/osd_scenarios/osd_disk_activate.sh b/ceph-releases/mimic/daemon/osd_scenarios/osd_disk_activate.sh deleted file mode 100644 index faeb8c561..000000000 --- a/ceph-releases/mimic/daemon/osd_scenarios/osd_disk_activate.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash -# shellcheck disable=SC2034 -set -e -source /opt/ceph-container/bin/disk_list.sh - -function osd_activate { - if [[ -z "${OSD_DEVICE}" ]] || [[ ! -b "${OSD_DEVICE}" ]]; then - log "ERROR: you either provided a non-existing device or no device at all." - log "You must provide a device to build your OSD ie: /dev/sdb" - exit 1 - fi - - CEPH_DISK_OPTIONS=() - - if [[ ${OSD_FILESTORE} -eq 1 ]] && [[ ${OSD_DMCRYPT} -eq 0 ]]; then - if [[ -n "${OSD_JOURNAL}" ]]; then - CLI+=("${OSD_JOURNAL}") - else - CLI+=("${OSD_DEVICE}") - fi - export DISK_LIST_SEARCH=journal - start_disk_list - JOURNAL_PART=$(start_disk_list) - unset DISK_LIST_SEARCH - JOURNAL_UUID=$(get_part_uuid "${JOURNAL_PART}") - fi - - # creates /dev/mapper/ for dmcrypt - # usually after a reboot they don't go created - udevadm trigger - - # watch the udev event queue, and exit if all current events are handled - udevadm settle --timeout=600 - - DATA_PART=$(dev_part "${OSD_DEVICE}" 1) - MOUNTED_PART=${DATA_PART} - - if [[ ${OSD_DMCRYPT} -eq 1 ]] && [[ ${OSD_FILESTORE} -eq 1 ]]; then - get_dmcrypt_filestore_uuid - mount_lockbox "$DATA_UUID" "$LOCKBOX_UUID" - CEPH_DISK_OPTIONS+=('--dmcrypt') - MOUNTED_PART="/dev/mapper/${DATA_UUID}" - open_encrypted_parts_filestore - elif [[ ${OSD_DMCRYPT} -eq 1 ]] && [[ ${OSD_BLUESTORE} -eq 1 ]]; then - get_dmcrypt_bluestore_uuid - mount_lockbox "$DATA_UUID" "$LOCKBOX_UUID" - CEPH_DISK_OPTIONS+=('--dmcrypt') - MOUNTED_PART="/dev/mapper/${DATA_UUID}" - open_encrypted_parts_bluestore - fi - - if [[ -z "${CEPH_DISK_OPTIONS[*]}" ]]; then - ceph-disk -v --setuser ceph --setgroup disk activate --no-start-daemon "${DATA_PART}" - else - ceph-disk -v --setuser ceph --setgroup disk activate "${CEPH_DISK_OPTIONS[@]}" --no-start-daemon "${DATA_PART}" - fi - - actual_part=$(readlink -f "${MOUNTED_PART}") - OSD_ID=$(grep "${actual_part}" /proc/mounts | awk '{print $2}' | sed -r 's/^.*-([0-9]+)$/\1/') - - if [[ ${OSD_BLUESTORE} -eq 1 ]]; then - # Get the device used for block db and wal otherwise apply_ceph_ownership_to_disks will fail - OSD_BLUESTORE_BLOCK_DB_TMP=$(resolve_symlink "${OSD_PATH}block.db") -# shellcheck disable=SC2034 - OSD_BLUESTORE_BLOCK_DB=${OSD_BLUESTORE_BLOCK_DB_TMP%?} -# shellcheck disable=SC2034 - OSD_BLUESTORE_BLOCK_WAL_TMP=$(resolve_symlink "${OSD_PATH}block.wal") -# shellcheck disable=SC2034 - OSD_BLUESTORE_BLOCK_WAL=${OSD_BLUESTORE_BLOCK_WAL_TMP%?} - fi - apply_ceph_ownership_to_disks - - log "SUCCESS" - # This ensures all resources have been unmounted after the OSD has exited - # We define `sigterm_cleanup_post` here because: - # - we want to 'protect' the following `exec` in particular. - # - having the cleaning code just next to the concerned function in the same file is nice. - function sigterm_cleanup_post { - local osd_mnt - osd_mnt=$(df --output=target | grep '/var/lib/ceph/osd/') - log "osd_disk_activate: Unmounting $osd_mnt" - umount "$osd_mnt" || (log "osd_disk_activate: Failed to umount $osd_mnt"; lsof "$osd_mnt") - } - exec /usr/bin/ceph-osd "${CLI_OPTS[@]}" -f -i "${OSD_ID}" --setuser ceph --setgroup disk -} diff --git a/ceph-releases/mimic/daemon/start_rbd_mirror.sh b/ceph-releases/mimic/daemon/start_rbd_mirror.sh deleted file mode 100755 index d535bfa41..000000000 --- a/ceph-releases/mimic/daemon/start_rbd_mirror.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -set -e - -function start_rbd_mirror { - get_config - check_config - - if [ "${CEPH_GET_ADMIN_KEY}" -eq 1 ]; then - # ensure we have the admin key - get_admin_key - check_admin_key - fi - - if [ ! -e "$RBD_MIRROR_KEYRING" ]; then - - if [ ! -e "$RBD_MIRROR_BOOTSTRAP_KEYRING" ]; then - log "ERROR- $RBD_MIRROR_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rbd -o $RBD_MIRROR_BOOTSTRAP_KEYRING'" - exit 1 - fi - - ceph_health client.bootstrap-rbd "$RBD_MIRROR_BOOTSTRAP_KEYRING" - - # Generate the rbd mirror key - ceph "${CLI_OPTS[@]}" --name client.bootstrap-rbd --keyring "$RBD_MIRROR_BOOTSTRAP_KEYRING" auth get-or-create client.rbd-mirror."${RBD_MIRROR_NAME}" mon 'profile rbd' osd 'profile rbd' -o "$RBD_MIRROR_KEYRING" - chown "${CHOWN_OPT[@]}" ceph. "$RBD_MIRROR_KEYRING" - chmod 0600 "$RBD_MIRROR_KEYRING" - fi - - log "SUCCESS" - # start rbd-mirror - exec /usr/bin/rbd-mirror "${DAEMON_OPTS[@]}" -n client.rbd-mirror."${RBD_MIRROR_NAME}" -} diff --git a/ceph-releases/mimic/daemon/variables_entrypoint.sh b/ceph-releases/mimic/daemon/variables_entrypoint.sh deleted file mode 100755 index 490af2705..000000000 --- a/ceph-releases/mimic/daemon/variables_entrypoint.sh +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/bash - - -################################### -# LIST OF ALL SCENARIOS AVAILABLE # -################################### - -ALL_SCENARIOS="populate_kvstore mon osd osd_directory osd_directory_single osd_ceph_disk osd_ceph_disk_prepare osd_ceph_disk_activate osd_ceph_activate_journal mds rgw rgw_user nfs zap_device mon_health mgr disk_introspection demo disk_list tcmu_runner rbd_target_api rbd_target_gw" - - -######################### -# LIST OF ALL VARIABLES # -######################### - -HOSTNAME=$(uname -n | cut -d'.' -f1) -HOST_FQDN=$(/dev/null; then - PYTHON=python -else - PYTHON=python3 -fi - -# Internal variables -MDS_KEYRING=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring -ADMIN_KEYRING=/etc/ceph/${CLUSTER}.client.admin.keyring -MON_KEYRING=/etc/ceph/${CLUSTER}.mon.keyring -RGW_KEYRING=/var/lib/ceph/radosgw/${CLUSTER}-rgw.${RGW_NAME}/keyring -MDS_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring -RGW_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring -OSD_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring -RBD_MIRROR_BOOTSTRAP_KEYRING=/var/lib/ceph/bootstrap-rbd/${CLUSTER}.keyring -OSD_PATH_BASE=/var/lib/ceph/osd/${CLUSTER} -MONMAP=/etc/ceph/monmap-${CLUSTER} -MGR_KEYRING=/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}/keyring -RBD_MIRROR_KEYRING=/etc/ceph/${CLUSTER}.client.rbd-mirror.${HOSTNAME}.keyring