-
Notifications
You must be signed in to change notification settings - Fork 0
/
start-ceph
executable file
·125 lines (116 loc) · 3.42 KB
/
start-ceph
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#!/bin/bash
# ideas - read config file that defines all envs, all vms, etc ...
# rather than having to hardcode them
# currently - each ceph cluster gets a section in the case statement below
# format:
# <ceph cluster>)
# vms="VM1 VM2 VM3 VM4...."
# node01="VM1"
#
# node01 is used to ssh and ceph -s
# matching hosts should be set up in /etc/hosts for ssh access
if [ "$1" == "" ]; then
echo "Usage : start-ceph <start|stop|destroy> <ceph4|ceph5|ceph4-79|rhcs5>"
echo "rhcs3 rhcs51 rhcs50 ceph5 ceph4-79 ceph4 rhcs4-rhel79 rhel87-cluster1"
echo "checking for running cephs or cluster"
virsh list |egrep 'rhcs|ceph|rhel'
exit;
fi
# $1 will be the action, not always the same as virsh
case $1 in
start|status)
action="$1"
;;
stop)
action="shutdown"
;;
destroy)
action="destroy"
;;
ssh)
action="ssh"
;;
*)
echo "action required : start,status,stop,destroy"
exit
;;
esac
# set up the vms and node01 from the different environments
case "$2" in
rhcs52)
vms="rhcs52-admin rhcs52-node01 rhcs52-node02 rhcs52-node03 "
node01="rhcs52-node01"
;;
rhcs3)
vms="rhcs3-admin rhcs3-node01 rhcs3-node02 rhcs3-node03 "
node01="rhcs3-node01"
;;
ceph5)
vms="ceph-node01 ceph-node02 ceph-node03 ceph-client "
node01="ceph-node01"
;;
ceph4)
vms="ceph4-node01 ceph4-node02 ceph4-node03 ceph4-admin "
node01="ceph4-node01"
;;
ceph4-rhel79)
vms="ceph4-79-node01 ceph4-79-node02 ceph4-79-node03 ceph4-79-admin "
node01="ceph4-rhel79-node01"
;;
rhcs4-rhel79)
vms="rhcs4-rhel79-node01 rhcs4-rhel79-node02 rhcs4-rhel79-node03 rhcs4-rhel79-admin "
node01="rhcs4-rhel79-node01"
;;
rhcs50)
vms="rhcs50-admin rhcs50-node01 rhcs50-node02 rhcs50-node03 "
node01="rhcs50-node01"
;;
rhcs51)
vms="rhcs51-admin rhcs51-node01 rhcs51-node02 rhcs51-node03 "
node01="rhcs51-node01"
;;
rhel87-cluster1)
# pacemaker cluster example - 2 lines to configure vms should be a list of all cluster nodes, node01 should be the "master" node (left over from ceph)
vms="rhel87-cluster1-node1 rhel87-cluster1-node3 rhel87-cluster1-node3 "
node01="rhel87-cluster1-node1"
;;
*)
echo "No ceph environement specified" # <<<<< again, edit to match your setup, "ceph" could be "cluster"
echo "rhcs51 rhcs50 ceph5 ceph4-79 ceph4 rhcs4-rhel79"
echo "checking for running cephs "
virsh list |egrep 'rhcs|ceph|rhel'
exit;
;;
esac
if [ "$action" == "ssh" ]; then
for i in $vms; do
echo "Running command '$3' on node $i"
ssh -x $i "$3"
echo
done
else
# special case <<<<<<< left over from ceph ... so could use some logic to do a `pcs status --full` or something if $2 contains "cluster"
if [ "$action" == "status" ]; then
echo "Checking ceph status on $node01"
ssh $node01 ceph -s
else
for i in $vms; do
virsh $action $i
done
fi
fi
if [[ "$action" == "shutdown" ]] || [[ "$action" == "destroy" ]]; then
echo "looping while waiting for shutdown"
while [ 1 ]; do
date
virsh list |egrep `echo $vms|sed s'/ /\|/g'`
if [ "$?" == "1" ]; then
exit
fi
sleep 15
done
fi
#virsh $action ceph-node01
#virsh $action ceph-node02
#virsh $action ceph-node03
#virsh $action ceph-client