From 656b80aa1c47dce1e1e3ae489db15a4d73f85fc6 Mon Sep 17 00:00:00 2001 From: sebgoa Date: Wed, 28 Sep 2016 10:57:11 +0200 Subject: [PATCH] Commit first pass of playbook --- README.md | 50 ++++++++++ ansible.cfg | 3 + boot.yml | 37 ++++++++ inventory | 18 ++++ k8s.yml | 18 ++++ roles/common/tasks/create_sshkey.yml | 18 ++++ roles/common/tasks/main.yml | 2 + roles/k8s-remove/tasks/delete_context.yml | 13 +++ roles/k8s-remove/tasks/delete_inv.yml | 4 + roles/k8s-remove/tasks/delete_secgroup.yml | 7 ++ .../tasks/delete_secgroup_rules.yml | 95 +++++++++++++++++++ roles/k8s-remove/tasks/delete_vm.yml | 12 +++ roles/k8s-remove/tasks/main.yml | 6 ++ roles/k8s/tasks/create_context.yml | 14 +++ roles/k8s/tasks/create_inv.yml | 5 + roles/k8s/tasks/create_secgroup.yml | 7 ++ roles/k8s/tasks/create_secgroup_rules.yml | 70 ++++++++++++++ roles/k8s/tasks/create_vm.yml | 24 +++++ roles/k8s/tasks/main.yml | 6 ++ roles/k8s/templates/inventory.j2 | 20 ++++ roles/k8s/templates/k8s-master.j2 | 23 +++++ roles/k8s/templates/k8s-node.j2 | 23 +++++ 22 files changed, 475 insertions(+) create mode 100644 README.md create mode 100644 ansible.cfg create mode 100644 boot.yml create mode 100644 inventory create mode 100644 k8s.yml create mode 100644 roles/common/tasks/create_sshkey.yml create mode 100644 roles/common/tasks/main.yml create mode 100644 roles/k8s-remove/tasks/delete_context.yml create mode 100644 roles/k8s-remove/tasks/delete_inv.yml create mode 100644 roles/k8s-remove/tasks/delete_secgroup.yml create mode 100644 roles/k8s-remove/tasks/delete_secgroup_rules.yml create mode 100644 roles/k8s-remove/tasks/delete_vm.yml create mode 100644 roles/k8s-remove/tasks/main.yml create mode 100644 roles/k8s/tasks/create_context.yml create mode 100644 roles/k8s/tasks/create_inv.yml create mode 100644 roles/k8s/tasks/create_secgroup.yml create mode 100644 roles/k8s/tasks/create_secgroup_rules.yml create mode 100644 roles/k8s/tasks/create_vm.yml create mode 100644 roles/k8s/tasks/main.yml create mode 100644 roles/k8s/templates/inventory.j2 create mode 100644 roles/k8s/templates/k8s-master.j2 create mode 100644 roles/k8s/templates/k8s-node.j2 diff --git a/README.md b/README.md new file mode 100644 index 0000000..47a99af --- /dev/null +++ b/README.md @@ -0,0 +1,50 @@ +Ansible playbook to install a development Kubernetes (k8s) cluster using kubeadm on CentOS in CloudStack +======================================================================================================== + + +Prerequisites +------------- + +You will need Ansible >= 2.0, sshpubkeys and [cs](https://github.com/exoscale/cs) :) + + $ sudo apt-get install -y python-pip + $ sudo pip install ansible + $ sudo pip install cs + $ sudo pip install sshpubkeys + +Setup cloudstack +---------------- + +Create a `~/.cloudstack.ini` file with your creds and cloudstack endpoint, for example: + + [cloudstack] + endpoint = https://api.exoscale.ch/compute + key = + secret = + method = post + +We need to use the http POST method to pass the userdata to the coreOS instances. + +Create a Kubernetes cluster +--------------------------- + + $ ansible-playbook k8s.yml + +Some variables can be edited in the `k8s.yml` file. +This will start a Kubernetes master node and a number of compute nodes. + +Check the tasks and templates in `roles/k8s`. + +Bootstrap with `kubeadm` +------------------------ + + $ ansible-playbook boot.yml + +Then: + + $ ansible master -a "kubectl get nodes" + master_node | SUCCESS | rc=0 >> + NAME STATUS AGE + kube-head Ready 44s + kube-node-1 Ready 5s + kube-node-2 Ready 5s diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..85bed06 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] +hostfile = ./inventory +host_key_checking = False diff --git a/boot.yml b/boot.yml new file mode 100644 index 0000000..b010750 --- /dev/null +++ b/boot.yml @@ -0,0 +1,37 @@ +--- +- hosts: master + vars: + init_token: foobar.1234 + tasks: + - name: Disable selinux + selinux: state=disabled + become: true + - name: Start Docker + service: enabled=yes state=started name=docker + become: true + - name: Start Kubelet + service: enabled=yes state=started name=kubelet + become: true + - name: Run Kubeadm master + command: kubeadm init --token {{ init_token }} + become: true + +- hosts: nodes + vars: + init_token: foobar.1234 + tags: nodes + tasks: + - name: Disable selinux + selinux: state=disabled + become: true + - name: Start Docker + service: enabled=yes state=started name=docker + become: true + - name: Start Kubelet + service: enabled=yes state=started name=kubelet + become: true + - name: Debug master_node IP + debug: msg='master node IP is {{ hostvars['master_node'].ansible_default_ipv4.address }}' + - name: Run Kubeadm nodes + command: kubeadm join --token {{ init_token }} {{ hostvars['master_node'].ansible_default_ipv4.address }} + become: true diff --git a/inventory b/inventory new file mode 100644 index 0000000..8ca8d80 --- /dev/null +++ b/inventory @@ -0,0 +1,18 @@ +[default] +# localhost ansible_python_interpreter=/opt/local/Library/Frameworks/Python.framework/Versions/2.7/bin/python + +[master] +master_node ansible_host=185.19.30.97 ansible_ssh_private_key_file=~/.ssh/id_rsa_k8s ansible_ssh_user=centos + +[nodes] +185.19.30.212 +185.19.30.213 + +[allnodes] +185.19.30.97 +185.19.30.212 +185.19.30.213 + +[nodes:vars] +ansible_ssh_user=centos +ansible_ssh_private_key_file=~/.ssh/id_rsa_k8s diff --git a/k8s.yml b/k8s.yml new file mode 100644 index 0000000..7864452 --- /dev/null +++ b/k8s.yml @@ -0,0 +1,18 @@ +--- +- hosts: localhost + connection: local + vars: + ssh_key: k8s + k8s_version: 1.4.0 + k8s_num_nodes: 2 + k8s_security_group_name: k8s + k8s_node_prefix: kube + k8s_username: foobar + k8s_password: FdKPSuwQ +# This template name is specific to http://exoscale.ch, replace it + k8s_template: Linux CentOS 7.1 64-bit 50G Disk (2015-03-31-2230b6) + k8s_instance_type: Tiny + + roles: + - common + - k8s diff --git a/roles/common/tasks/create_sshkey.yml b/roles/common/tasks/create_sshkey.yml new file mode 100644 index 0000000..2b3f917 --- /dev/null +++ b/roles/common/tasks/create_sshkey.yml @@ -0,0 +1,18 @@ + - name: Create SSH Key + local_action: + module: cs_sshkeypair + name: "{{ ssh_key }}" + register: key + tags: sshkey + + - debug: msg='private key is {{ key.private_key }}' + when: key.changed + tags: sshkey + + - local_action: copy content="{{ key.private_key }}" dest="~/.ssh/id_rsa_{{ ssh_key }}" + when: key.changed + tags: sshkey + + - file: path="~/.ssh/id_rsa_{{ ssh_key }}" mode=0600 + when: key.changed + tags: sshkey diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml new file mode 100644 index 0000000..df885bc --- /dev/null +++ b/roles/common/tasks/main.yml @@ -0,0 +1,2 @@ +--- + - include: create_sshkey.yml diff --git a/roles/k8s-remove/tasks/delete_context.yml b/roles/k8s-remove/tasks/delete_context.yml new file mode 100644 index 0000000..6074005 --- /dev/null +++ b/roles/k8s-remove/tasks/delete_context.yml @@ -0,0 +1,13 @@ +# Create k8s context + + - name: Unset cluster + command: kubectl config unset clusters.exo + tags: context + + - name: Unset context + command: kubectl config unset contexts.exo + tags: context + + - name: Unset current context + command: kubectl config unset current-context + tags: context diff --git a/roles/k8s-remove/tasks/delete_inv.yml b/roles/k8s-remove/tasks/delete_inv.yml new file mode 100644 index 0000000..30fe3d4 --- /dev/null +++ b/roles/k8s-remove/tasks/delete_inv.yml @@ -0,0 +1,4 @@ +# Removes inventory file + + - name: Remove old inventory + file: path=./inventory state=absent diff --git a/roles/k8s-remove/tasks/delete_secgroup.yml b/roles/k8s-remove/tasks/delete_secgroup.yml new file mode 100644 index 0000000..fd7dad7 --- /dev/null +++ b/roles/k8s-remove/tasks/delete_secgroup.yml @@ -0,0 +1,7 @@ +# Create k8s security group + + - name: Remove k8s Security Group + local_action: + module: cs_securitygroup + name: "{{ k8s_security_group_name }}" + state: absent diff --git a/roles/k8s-remove/tasks/delete_secgroup_rules.yml b/roles/k8s-remove/tasks/delete_secgroup_rules.yml new file mode 100644 index 0000000..15e8757 --- /dev/null +++ b/roles/k8s-remove/tasks/delete_secgroup_rules.yml @@ -0,0 +1,95 @@ +# Add Rule to etcd security group + +# - name: etcd access for k8s +# local_action: +# module: cs_securitygroup_rule +# name: "{{ etcd_security_group_name }}" +# start_port: 4001 +# end_port: 4001 +# user_security_group: "{{ k8s_security_group_name }}" + +# Rules for the k8s security group + + - name: Remove SSH for k8s + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 22 + end_port: 22 + state: absent + + +#etcd + - name: Remove etcd 4001 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 4001 + end_port: 4001 + user_security_group: "{{ k8s_security_group_name }}" + state: absent + + - name: Remove etcd 7001 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 7001 + end_port: 7001 + user_security_group: "{{ k8s_security_group_name }}" + state: absent + + - name: Remove etcd 2379 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 7001 + end_port: 7001 + user_security_group: "{{ k8s_security_group_name }}" + state: absent + + - name: Remove etcd 2380 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 2380 + end_port: 2380 + user_security_group: "{{ k8s_security_group_name }}" + state: absent + + - name: Remove flannel UDP 8472 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 8472 + end_port: 8472 + protocol: udp + user_security_group: "{{ k8s_security_group_name }}" + state: absent + + - name: Remove secure 443 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 443 + end_port: 443 + protocol: tcp + user_security_group: "{{ k8s_security_group_name }}" + state: absent + +#k8s api + - name: Remove k8s 8080 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 8080 + end_port: 8080 + user_security_group: "{{ k8s_security_group_name }}" + state: absent + + - name: Remove k8s public 8080 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 8080 + end_port: 8080 + state: absent diff --git a/roles/k8s-remove/tasks/delete_vm.yml b/roles/k8s-remove/tasks/delete_vm.yml new file mode 100644 index 0000000..a0f0406 --- /dev/null +++ b/roles/k8s-remove/tasks/delete_vm.yml @@ -0,0 +1,12 @@ + - name: Remove k8s head node + local_action: + module: cs_instance + name: "{{ k8s_node_prefix }}-head" + state: absent + + - name: Remove k8s nodes + local_action: + module: cs_instance + name: "{{ k8s_node_prefix }}-node-{{ item }}" + state: absent + with_sequence: count={{ k8s_num_nodes }} diff --git a/roles/k8s-remove/tasks/main.yml b/roles/k8s-remove/tasks/main.yml new file mode 100644 index 0000000..ca37f16 --- /dev/null +++ b/roles/k8s-remove/tasks/main.yml @@ -0,0 +1,6 @@ +--- + - include: delete_vm.yml + - include: delete_inv.yml + - include: delete_secgroup_rules.yml + - include: delete_secgroup.yml + - include: delete_context.yml diff --git a/roles/k8s/tasks/create_context.yml b/roles/k8s/tasks/create_context.yml new file mode 100644 index 0000000..0b99870 --- /dev/null +++ b/roles/k8s/tasks/create_context.yml @@ -0,0 +1,14 @@ +# Create k8s context + + - name: Set context cluster + command: kubectl config set-cluster exo --server=https://{{ k8s_master.default_ip }}:443 --insecure-skip-tls-verify=true + tags: context + + - name: Set context user + command: kubectl config set-credentials exo --username={{ k8s_username }} --password={{ k8s_password}} + + - name: Create context + command: kubectl config set-context exo --cluster=exo --user=exo + + - name: Use context + command: kubectl config use-context exo diff --git a/roles/k8s/tasks/create_inv.yml b/roles/k8s/tasks/create_inv.yml new file mode 100644 index 0000000..a9d49d1 --- /dev/null +++ b/roles/k8s/tasks/create_inv.yml @@ -0,0 +1,5 @@ +# Create inventory file + + - name: Create inventory file + template: src=inventory.j2 dest=./inventory + tags: inventory diff --git a/roles/k8s/tasks/create_secgroup.yml b/roles/k8s/tasks/create_secgroup.yml new file mode 100644 index 0000000..b6bd96c --- /dev/null +++ b/roles/k8s/tasks/create_secgroup.yml @@ -0,0 +1,7 @@ +# Create k8s security group + + - name: Create k8s Security Group + local_action: + module: cs_securitygroup + name: "{{ k8s_security_group_name }}" + description: k8s diff --git a/roles/k8s/tasks/create_secgroup_rules.yml b/roles/k8s/tasks/create_secgroup_rules.yml new file mode 100644 index 0000000..342e097 --- /dev/null +++ b/roles/k8s/tasks/create_secgroup_rules.yml @@ -0,0 +1,70 @@ +# Add Rule to etcd security group + +# - name: etcd access for k8s +# local_action: +# module: cs_securitygroup_rule +# name: "{{ etcd_security_group_name }}" +# start_port: 4001 +# end_port: 4001 +# user_security_group: "{{ k8s_security_group_name }}" + +# Rules for the k8s security group + +# internal + - name: flannel etcd 2379 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 2379 + end_port: 2379 + user_security_group: "{{ k8s_security_group_name }}" + + - name: flannel etcd 2380 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 2380 + end_port: 2380 + user_security_group: "{{ k8s_security_group_name }}" + + - name: k8s 8080 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 8080 + end_port: 8080 + user_security_group: "{{ k8s_security_group_name }}" + + - name: flannel UDP 8472 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 8472 + end_port: 8472 + protocol: udp + user_security_group: "{{ k8s_security_group_name }}" + + - name: kubelet logs and exec TCP 10250 + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 10250 + end_port: 10250 + protocol: tcp + user_security_group: "{{ k8s_security_group_name }}" + +# external access + + - name: SSH for k8s + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 22 + end_port: 22 + + - name: k8s public secure + local_action: + module: cs_securitygroup_rule + security_group: "{{ k8s_security_group_name }}" + start_port: 443 + end_port: 443 diff --git a/roles/k8s/tasks/create_vm.yml b/roles/k8s/tasks/create_vm.yml new file mode 100644 index 0000000..fe26711 --- /dev/null +++ b/roles/k8s/tasks/create_vm.yml @@ -0,0 +1,24 @@ + - name: Start k8s head node + local_action: + module: cs_instance + name: "{{ k8s_node_prefix }}-head" + template: "{{ k8s_template }}" + service_offering: "{{ k8s_instance_type }}" + ssh_key: "{{ ssh_key }}" + security_groups: [ '{{ k8s_security_group_name }}' ] + user_data: "{{ lookup('template', '../templates/k8s-master.j2') }}" + register: k8s_master + + - debug: msg='k8s master IP is {{ k8s_master.default_ip }}' + + - name: Start k8s nodes + local_action: + module: cs_instance + name: "{{ k8s_node_prefix }}-node-{{ item }}" + template: "{{ k8s_template }}" + service_offering: "{{ k8s_instance_type }}" + ssh_key: "{{ ssh_key }}" + security_groups: [ '{{ k8s_security_group_name }}' ] + user_data: "{{ lookup('template', '../templates/k8s-node.j2') }}" + with_sequence: count={{ k8s_num_nodes }} + register: k8s_nodes diff --git a/roles/k8s/tasks/main.yml b/roles/k8s/tasks/main.yml new file mode 100644 index 0000000..fbb017d --- /dev/null +++ b/roles/k8s/tasks/main.yml @@ -0,0 +1,6 @@ +--- + - include: create_secgroup.yml + - include: create_secgroup_rules.yml + - include: create_vm.yml + - include: create_inv.yml + - include: create_context.yml diff --git a/roles/k8s/templates/inventory.j2 b/roles/k8s/templates/inventory.j2 new file mode 100644 index 0000000..3a38db0 --- /dev/null +++ b/roles/k8s/templates/inventory.j2 @@ -0,0 +1,20 @@ +[default] +# localhost ansible_python_interpreter=/opt/local/Library/Frameworks/Python.framework/Versions/2.7/bin/python + +[master] +master_node ansible_host={{ k8s_master.default_ip }} ansible_ssh_private_key_file=~/.ssh/id_rsa_k8s ansible_ssh_user=centos + +[nodes] +{% for item in k8s_nodes.results %} +{{ item.default_ip }} +{% endfor %} + +[allnodes] +{{ k8s_master.default_ip }} +{% for item in k8s_nodes.results %} +{{ item.default_ip }} +{% endfor %} + +[nodes:vars] +ansible_ssh_user=centos +ansible_ssh_private_key_file=~/.ssh/id_rsa_{{ ssh_key }} diff --git a/roles/k8s/templates/k8s-master.j2 b/roles/k8s/templates/k8s-master.j2 new file mode 100644 index 0000000..f9f4c2c --- /dev/null +++ b/roles/k8s/templates/k8s-master.j2 @@ -0,0 +1,23 @@ +#cloud-config +write_files: + - path: /etc/yum.repos.d/kubernetes.repo + content: | + [kubernetes] + name=Kubernetes + baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64 + enabled=1 + gpgcheck=1 + gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +package_update: true + +packages: + - docker + - kubelet + - kubeadm + - kubectl + - kubernetes-cni + +power_state: + mode: reboot diff --git a/roles/k8s/templates/k8s-node.j2 b/roles/k8s/templates/k8s-node.j2 new file mode 100644 index 0000000..f9f4c2c --- /dev/null +++ b/roles/k8s/templates/k8s-node.j2 @@ -0,0 +1,23 @@ +#cloud-config +write_files: + - path: /etc/yum.repos.d/kubernetes.repo + content: | + [kubernetes] + name=Kubernetes + baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64 + enabled=1 + gpgcheck=1 + gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + +package_update: true + +packages: + - docker + - kubelet + - kubeadm + - kubectl + - kubernetes-cni + +power_state: + mode: reboot