From 6fd7afb6b8b4f105bb1399944abe997f46105e4f Mon Sep 17 00:00:00 2001 From: Jaskaranbir Date: Sun, 30 Sep 2018 03:12:16 -0400 Subject: [PATCH] Initial Commit --- .gitignore | 3 + LICENSE | 13 + README.md | 54 ++++ Vagrantfile | 235 ++++++++++++++++++ ansible.cfg | 22 ++ group_vars/all.yml | 47 ++++ inventory/hosts.yml | 25 ++ kubernetes.yml | 63 +++++ macros/recursive_template_copy.yml | 25 ++ macros/template_copy.yml | 16 ++ roles/certs/defaults/main.yml | 2 + roles/certs/tasks/ca.yml | 53 ++++ roles/certs/tasks/main.yml | 19 ++ roles/certs/tasks/master-certs.yml | 43 ++++ roles/certs/tasks/worker-certs.yml | 43 ++++ roles/certs/templates/etcd-openssl.cnf.j2 | 22 ++ roles/certs/templates/master-openssl.cnf.j2 | 22 ++ roles/certs/templates/worker-openssl.cnf.j2 | 15 ++ roles/cni/defaults/main.yml | 2 + roles/cni/tasks/main.yml | 38 +++ roles/cni/templates/00-weave.conflist.j2 | 18 ++ roles/distribute-certs/tasks/main.yml | 6 + roles/etcd/defaults/main.yml | 6 + roles/etcd/tasks/main.yml | 32 +++ roles/etcd/templates/etcd.service.j2 | 22 ++ roles/etcd/templates/etcd.yml.j2 | 24 ++ roles/kube-addons/defaults/main.yml | 15 ++ roles/kube-addons/tasks/main.yml | 71 ++++++ .../templates/configs/haproxy.cfg.j2 | 66 +++++ .../core-dns/coredns-clusterrole.yml.j2 | 21 ++ .../coredns-clusterrolebinding.yml.j2 | 20 ++ .../templates/core-dns/coredns-config.yml.j2 | 24 ++ .../core-dns/coredns-deployment.yml.j2 | 100 ++++++++ .../templates/core-dns/coredns-sa.yml.j2 | 11 + .../templates/core-dns/coredns-svc.yml.j2 | 31 +++ .../dashboard-clusterrolebinding.yml.j2 | 17 ++ .../dashboard/dashboard-configmap.yaml.j2 | 12 + .../dashboard/dashboard-controller.yaml.j2 | 74 ++++++ .../dashboard/dashboard-secret.yaml.j2 | 13 + .../dashboard/dashboard-service.yaml.j2 | 18 ++ .../templates/haproxy/haproxy.yml.j2 | 40 +++ roles/kube-master/defaults/main.yml | 50 ++++ roles/kube-master/tasks/main.yml | 41 +++ .../templates/configs/config-kubelet.yaml.j2 | 21 ++ .../templates/configs/config-proxy.yaml.j2 | 12 + .../configs/config-scheduler.yaml.j2 | 9 + .../kubeconfig-controller-manager.yaml.j2 | 21 ++ .../configs/kubeconfig-kubelet.yaml.j2 | 21 ++ .../configs/kubeconfig-proxy.yaml.j2 | 21 ++ .../configs/kubeconfig-scheduler.yaml.j2 | 21 ++ .../manifests/docker-compose/kubelet.yml.j2 | 41 +++ .../kubernetes/kube-apiserver.yaml.j2 | 90 +++++++ .../kube-controller-manager.yaml.j2 | 70 ++++++ .../manifests/kubernetes/kube-proxy.yaml.j2 | 76 ++++++ .../kubernetes/kube-scheduler.yaml.j2 | 66 +++++ .../templates/services/kubelet.service.j2 | 18 ++ roles/kube-worker/defaults/main.yml | 33 +++ roles/kube-worker/tasks/main.yml | 41 +++ .../templates/configs/config-kubelet.yaml.j2 | 21 ++ .../templates/configs/config-proxy.yaml.j2 | 12 + .../configs/kubeconfig-kubelet.yaml.j2 | 21 ++ .../configs/kubeconfig-proxy.yaml.j2 | 21 ++ .../manifests/docker-compose/kubelet.yml.j2 | 40 +++ .../kubernetes/kube-addon-manager.yaml.j2 | 40 +++ .../manifests/kubernetes/kube-proxy.yaml.j2 | 76 ++++++ .../templates/services/kubelet.service.j2 | 18 ++ roles/kubectl/defaults/main.yml | 4 + roles/kubectl/tasks/config-kubectl.yml | 66 +++++ roles/kubectl/tasks/install-kubectl.yml | 21 ++ roles/kubectl/tasks/main.yml | 8 + 70 files changed, 2403 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100644 Vagrantfile create mode 100644 ansible.cfg create mode 100644 group_vars/all.yml create mode 100644 inventory/hosts.yml create mode 100644 kubernetes.yml create mode 100644 macros/recursive_template_copy.yml create mode 100644 macros/template_copy.yml create mode 100644 roles/certs/defaults/main.yml create mode 100644 roles/certs/tasks/ca.yml create mode 100644 roles/certs/tasks/main.yml create mode 100644 roles/certs/tasks/master-certs.yml create mode 100644 roles/certs/tasks/worker-certs.yml create mode 100644 roles/certs/templates/etcd-openssl.cnf.j2 create mode 100644 roles/certs/templates/master-openssl.cnf.j2 create mode 100644 roles/certs/templates/worker-openssl.cnf.j2 create mode 100644 roles/cni/defaults/main.yml create mode 100644 roles/cni/tasks/main.yml create mode 100644 roles/cni/templates/00-weave.conflist.j2 create mode 100644 roles/distribute-certs/tasks/main.yml create mode 100644 roles/etcd/defaults/main.yml create mode 100644 roles/etcd/tasks/main.yml create mode 100644 roles/etcd/templates/etcd.service.j2 create mode 100644 roles/etcd/templates/etcd.yml.j2 create mode 100644 roles/kube-addons/defaults/main.yml create mode 100644 roles/kube-addons/tasks/main.yml create mode 100644 roles/kube-addons/templates/configs/haproxy.cfg.j2 create mode 100644 roles/kube-addons/templates/core-dns/coredns-clusterrole.yml.j2 create mode 100644 roles/kube-addons/templates/core-dns/coredns-clusterrolebinding.yml.j2 create mode 100644 roles/kube-addons/templates/core-dns/coredns-config.yml.j2 create mode 100644 roles/kube-addons/templates/core-dns/coredns-deployment.yml.j2 create mode 100644 roles/kube-addons/templates/core-dns/coredns-sa.yml.j2 create mode 100644 roles/kube-addons/templates/core-dns/coredns-svc.yml.j2 create mode 100644 roles/kube-addons/templates/dashboard/dashboard-clusterrolebinding.yml.j2 create mode 100644 roles/kube-addons/templates/dashboard/dashboard-configmap.yaml.j2 create mode 100644 roles/kube-addons/templates/dashboard/dashboard-controller.yaml.j2 create mode 100644 roles/kube-addons/templates/dashboard/dashboard-secret.yaml.j2 create mode 100644 roles/kube-addons/templates/dashboard/dashboard-service.yaml.j2 create mode 100644 roles/kube-addons/templates/haproxy/haproxy.yml.j2 create mode 100644 roles/kube-master/defaults/main.yml create mode 100644 roles/kube-master/tasks/main.yml create mode 100644 roles/kube-master/templates/configs/config-kubelet.yaml.j2 create mode 100644 roles/kube-master/templates/configs/config-proxy.yaml.j2 create mode 100644 roles/kube-master/templates/configs/config-scheduler.yaml.j2 create mode 100644 roles/kube-master/templates/configs/kubeconfig-controller-manager.yaml.j2 create mode 100644 roles/kube-master/templates/configs/kubeconfig-kubelet.yaml.j2 create mode 100644 roles/kube-master/templates/configs/kubeconfig-proxy.yaml.j2 create mode 100644 roles/kube-master/templates/configs/kubeconfig-scheduler.yaml.j2 create mode 100644 roles/kube-master/templates/manifests/docker-compose/kubelet.yml.j2 create mode 100644 roles/kube-master/templates/manifests/kubernetes/kube-apiserver.yaml.j2 create mode 100644 roles/kube-master/templates/manifests/kubernetes/kube-controller-manager.yaml.j2 create mode 100644 roles/kube-master/templates/manifests/kubernetes/kube-proxy.yaml.j2 create mode 100644 roles/kube-master/templates/manifests/kubernetes/kube-scheduler.yaml.j2 create mode 100644 roles/kube-master/templates/services/kubelet.service.j2 create mode 100644 roles/kube-worker/defaults/main.yml create mode 100644 roles/kube-worker/tasks/main.yml create mode 100644 roles/kube-worker/templates/configs/config-kubelet.yaml.j2 create mode 100644 roles/kube-worker/templates/configs/config-proxy.yaml.j2 create mode 100644 roles/kube-worker/templates/configs/kubeconfig-kubelet.yaml.j2 create mode 100644 roles/kube-worker/templates/configs/kubeconfig-proxy.yaml.j2 create mode 100644 roles/kube-worker/templates/manifests/docker-compose/kubelet.yml.j2 create mode 100644 roles/kube-worker/templates/manifests/kubernetes/kube-addon-manager.yaml.j2 create mode 100644 roles/kube-worker/templates/manifests/kubernetes/kube-proxy.yaml.j2 create mode 100644 roles/kube-worker/templates/services/kubelet.service.j2 create mode 100644 roles/kubectl/defaults/main.yml create mode 100644 roles/kubectl/tasks/config-kubectl.yml create mode 100644 roles/kubectl/tasks/install-kubectl.yml create mode 100644 roles/kubectl/tasks/main.yml diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b2f5d5d --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.history +.vagrant +.vscode diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..ad1989f --- /dev/null +++ b/LICENSE @@ -0,0 +1,13 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..f31a7d8 --- /dev/null +++ b/README.md @@ -0,0 +1,54 @@ +## High-Availability Multi-Node Kubernetes Cluster +--- + +A **completely Dockerized** multi-node Kubernetes highly-available cluster provisioned using Vagrant/Ansible, based on Kubernetes version **1.12** (still not enough fancy words for a day :smiley:) + +**Note**: This is not a production-ready setup. Instead, this is intended to be a base/idea for one (if looking for custom setups, otherwise [Kubeadm][0] does job pretty well). + +### How Stuff Works + +#### Kubernetes + +* The setup uses multi-master and multi-worker setup (and multi-etcd, of course). + +* On the master-node side, everything is ordinary, as you would expect from any regular Kubernetes master. + +* On the worker-node side, the master-nodes are loadbalanced using HAProxy. So the Kubelet connects to HAProxy's address instead of a specific master. + +* Yes, HAProxy runs on each of the worker-nodes instead of master. This is because if the master goes down, it also takes down loadbalancer with it (not an ideal scenario). + +* CNI: [Weave Net][1] + +* DNS: [Core DNS][2] + +#### Vagrant + +* Vagrant is simply a convenient way of automatically spinning up a cluster. You can easily configure the instances in `Vagrantfile`. + +* Uses Virtualbox. + +* Default instance-count: +``` +ETCD: 1 +Kube-Master: 1 +Kube-Worker: 2 +``` + +* The setup is based on a custom packed **CoreOS** based Vagrant-image. Image-source: [Jaskaranbir/packer_coreos-ansible-python][3] + +* Just run `vagarnt up`, and it will automatically run install/run Ansible and setup a local Kubernetes cluster. + +TODO: Improve cluster-security. +Suggestions welcomed. + +### Ansible Notes + +* When adding/removing instances, be sure to also update the Ansible [inventory][4]. + +* Ansible copies its templates for manifests/configs to `/etc/kubernetes`, which will contain all Kubernetes resources, including certificates. + + [0]: https://kubernetes.io/docs/setup/independent/install-kubeadm/ + [1]: https://www.weave.works/oss/net/ + [2]: https://coredns.io/ + [3]: https://github.com/Jaskaranbir/packer_coreos-ansible-python + [4]: https://github.com/Jaskaranbir/k8s_ha_multinode/blob/master/inventory/hosts.yml diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000..f25fe7f --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,235 @@ +# -*- mode: ruby -*- +# # vi: set ft=ruby : + +require 'fileutils' + +Vagrant.require_version ">= 1.6.0" + +# CoreOS doesn't support vboxsf annd guest-additions for virtualbox +# So we need to use NFS, and Vagrant NFS doesn't work without this +plugin_dependencies = [ + "vagrant-winnfsd", + "vagrant-hostmanager" +] + +needsRestart = false + +# Install plugins if required +plugin_dependencies.each do |plugin_name| + unless Vagrant.has_plugin? plugin_name + system("vagrant plugin install #{plugin_name}") + needsRestart = true + puts "#{plugin_name} installed" + end +end + +# Restart vagrant if new plugins were installed +if needsRestart === true + exec "vagrant #{ARGV.join(' ')}" +end + +# Use old vb_xxx config variables when set +def vm_gui + $vb_gui.nil? ? $vm_gui : $vb_gui +end + +def vm_memory + $vb_memory.nil? ? $vm_memory : $vb_memory +end + +def vm_cpus + $vb_cpus.nil? ? $vm_cpus : $vb_cpus +end + +$vm_configs = [ + # Defaults for config options + etcd_config: { + num_instances: 1, + instance_name_prefix: "etcd", + enable_serial_logging: false, + + vm_gui: false, + vm_memory: 512, + vm_cpus: 1, + vb_cpuexecutioncap: 80, + + user_home_path: "/home/core", + forwarded_ports: [], + shared_folders: [ + { + host_path: "./", + guest_path: "/vagrant" + } + ] + }, + + kube_master_config: { + num_instances: 1, + instance_name_prefix: "kube-master", + enable_serial_logging: false, + + vm_gui: false, + vm_memory: 2048, + vm_cpus: 2, + vb_cpuexecutioncap: 80, + + user_home_path: "/home/core", + forwarded_ports: [], + shared_folders: [ + { + host_path: "./", + guest_path: "/vagrant" + } + ] + }, + + kube_worker_config: { + num_instances: 2, + instance_name_prefix: "kube-worker", + enable_serial_logging: false, + + vm_gui: false, + vm_memory: 1024, + vm_cpus: 2, + vb_cpuexecutioncap: 80, + + user_home_path: "/home/core", + forwarded_ports: [], + shared_folders: [ + { + host_path: "./", + guest_path: "/vagrant" + } + ] + } +] + +Vagrant.configure("2") do |config| + # always use Vagrants insecure key + config.ssh.insert_key = true + # forward ssh agent to easily ssh into the different machines + config.ssh.forward_agent = false + + # Hostmanager + config.hostmanager.enabled = true + config.hostmanager.manage_guest = true + config.hostmanager.ignore_private_ip = false + + config.vm.box = "jaskaranbir/coreos-ansible" + config.vm.boot_timeout = 500 + + config.vm.provider :virtualbox do |vbox| + # On VirtualBox, we don't have guest additions or a functional vboxsf + # in CoreOS, so tell Vagrant that so it can be smarter. + vbox.check_guest_additions = false + vbox.functional_vboxsf = false + end + + # plugin conflict + if Vagrant.has_plugin?("vagrant-vbguest") then + config.vbguest.auto_update = false + end + + # This keeps track of total number of instances in all VMs + # It is dynamically incremented as the VM configs are iterated + vm_num_instances_offset = 0 + + # We need to know total number of instances so we run ansible + # only once, at last instance. + total_instances_count = 0 + $vm_configs.each do | vm_config | + vm_config.each do |_, vc| + total_instances_count += vc[:num_instances] + end + end + + # ================= VM-specific Configurations ================= + + $vm_configs.each do |vm_config| + vm_config.each do |vm_config_name, vc| + (1..vc[:num_instances]).each do |i| + config.vm.define vm_name = "%s-%02d" % [vc[:instance_name_prefix], i] do |config| + vm_num_instances_offset += 1 + config.vm.hostname = vm_name + + # Serial Logging + if vc[:enable_serial_logging] + logdir = File.join(File.dirname(__FILE__), "log") + FileUtils.mkdir_p(logdir) + + serialFile = File.join(logdir, "%s-%s-serial.txt" % [vm_name, vc[:instance_name_prefix]]) + FileUtils.touch(serialFile) + + config.vm.provider :virtualbox do |vb, override| + vb.customize ["modifyvm", :id, "--uart1", "0x3F8", "4"] + vb.customize ["modifyvm", :id, "--uartmode1", serialFile] + end + end + + # VM hardware resources configurations + config.vm.provider :virtualbox do |vb| + vb.gui = vc[:vm_gui] + vb.memory = vc[:vm_memory] + vb.cpus = vc[:vm_cpus] + vb.customize [ + "modifyvm", :id, + "--cpuexecutioncap", "#{vc[:vb_cpuexecutioncap]}" + ] + end + + ip = "172.17.8.#{vm_num_instances_offset + 100}" + config.vm.network :private_network, ip: ip, auto_correct: true + + # Port Forwarding + vc[:forwarded_ports].each do |port| + config.vm.network :forwarded_port, + host: port[:host_port], + guest: port[:guest_port], + auto_correct: true + end + + # # Shared folders + vc[:shared_folders].each_with_index do |share, i| + config.vm.synced_folder share[:host_path], share[:guest_path], + id: "core-share%02d" % vm_num_instances_offset, + nfs: true, + mount_options: ['nolock,vers=3,udp'] + end + + # Automatically set current-dir to /vagrant on vagrant ssh + config.vm.provision :shell, + inline: "echo 'cd /vagrant' >> #{vc[:user_home_path]}/.bashrc" + + # Ansible 2.6+ works only when SSH key is protected. + # So we manually copy the SSH key and set its permissions. + config.vm.provision :shell, + privileged: true, inline: <<-EOF + mkdir -p "#{vc[:user_home_path]}/.ssh" + cp "/vagrant/.vagrant/machines/#{vm_name}/virtualbox/private_key" "#{vc[:user_home_path]}/.ssh/id_rsa" + chmod 0400 "#{vc[:user_home_path]}/.ssh/id_rsa" + EOF + + # Run Ansible provisioning when its last instance, so its only run once + if vm_num_instances_offset === total_instances_count + # Copy ansible directory to enable provisioning + config.vm.provision :shell, + inline: "mkdir -p -m777 /ansible", + privileged: true + config.vm.provision "file", source: "./", destination: "/ansible" + # File-provisioner needs full permissions to copy files, + # but ansible 2.6+ will not work unless parent dir is write-protected. + config.vm.provision :shell, + inline: "chmod 744 /ansible", + privileged: true + + config.vm.provision :shell, + inline: "cd /ansible" \ + " && /opt/bin/active_python/bin/ansible-playbook" \ + " kubernetes.yml -vv", + privileged: true + end + end + end + end + end +end diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..8517096 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,22 @@ +[defaults] +ansible_managed = Please do not modify this file directly as it is managed by Ansible and could be overwritten. +host_key_checking = false +inventory = ./inventory/hosts.yml +remote_user = core +retry_files_enabled = false +timeout = 30 + +[colors] +changed = yellow +debug = dark gray +deprecate = purple +diff_add = green +diff_lines = cyan +diff_remove = red +error = red +highlight = white +ok = green +skip = cyan +unreachable = red +verbose = blue +warn = bright purple diff --git a/group_vars/all.yml b/group_vars/all.yml new file mode 100644 index 0000000..48aa9e4 --- /dev/null +++ b/group_vars/all.yml @@ -0,0 +1,47 @@ +# Any generated resources should go into these directories +out_dir: "{{ playbook_dir }}/out" +host_out_dir: "{{ out_dir }}/{{ ansible_host }}" +ca_base_resources_dir: "{{ out_dir }}/ca" + +# Repositories +hyperkube_image_repo: "gcr.io/google-containers/hyperkube" +kubernetes_version: "v1.12.0" + +# Newly downloaded binaries will be installed in this directory +binary_copy_path: "/opt/bin" +docker_compose_path: "/opt/bin/docker-compose" +docker_path: "/usr/bin" +base_kube_dir: "/etc/kubernetes" + +# Config files +kube_config_dir: "{{ base_kube_dir }}/configs" + +# Resources manifest-files +manifest_dir: "{{ base_kube_dir }}/manifests" +compose_manifest_dir: "{{ manifest_dir }}/compose" +kube_manifest_dir: "{{ manifest_dir }}/kube" + +kube_pki_dir: "{{ base_kube_dir }}/pki" + +# Systemd service files location +systemd_service_dir: "/etc/systemd/system" +kube_psp_dir: "{{ kube_addons_dir }}/pod-security-policies" + +# Local node-port where HAProxy is running +# as reverse-proxy to the apiserver +haproxy_port: 6443 +# The port on master-node where APIServer is running +apiserver_port: 443 + +dns_service_ip: "10.3.0.10" +dns_domain: "cluster.local" +service_ip_range: "10.3.0.0/24" +kube_service_network : "10.3.0.1" +kube_pod_network: "10.2.0.0/16" +# APIServer used for initiating setup. +# Initially, we need a static apiserver address. +# This static address is replaced by a HAProxy reverse-proxy +# once the addons are setup as required. +first_master_host: "{{ groups['kubernetes-masters'][0] }}" +init_apiserver_address: | + "https://{{ hostvars[first_master_host]['ansible_env']['COREOS_PUBLIC_IPV4'] }}:{{ apiserver_port }}" diff --git a/inventory/hosts.yml b/inventory/hosts.yml new file mode 100644 index 0000000..abf5942 --- /dev/null +++ b/inventory/hosts.yml @@ -0,0 +1,25 @@ +all: + vars: + ansible_python_interpreter: /opt/bin/python + + children: + + etcd: + hosts: + etcd-01: + ansible_ssh_private_key_file: ./.vagrant/machines/etcd-01/virtualbox/private_key + + kubernetes: + children: + + kubernetes-masters: + hosts: + kube-master-01: + ansible_ssh_private_key_file: ./.vagrant/machines/kube-master-01/virtualbox/private_key + + kubernetes-workers: + hosts: + kube-worker-01: + ansible_ssh_private_key_file: ./.vagrant/machines/kube-worker-01/virtualbox/private_key + kube-worker-02: + ansible_ssh_private_key_file: ./.vagrant/machines/kube-worker-02/virtualbox/private_key diff --git a/kubernetes.yml b/kubernetes.yml new file mode 100644 index 0000000..bd11e40 --- /dev/null +++ b/kubernetes.yml @@ -0,0 +1,63 @@ +--- +# This Playbook deploys the Kubernetes cluster + +- name: Setup TLS cetificates + hosts: all + become: true + run_once: true + roles: + - certs + tags: + - certs + +- name: Distribute TLS cetificates + hosts: all + become: true + roles: + - distribute-certs + tags: + - certs + - distribute-certs + +- name: Configure etcd cluster + hosts: etcd + become: true + roles: + - etcd + tags: + - etcd + +- name: Configure kubectl + hosts: all + become: true + roles: + - role: kubectl + tags: + - kubectl + +- name: Configure Kubernetes master(s) + hosts: kubernetes-masters + become: true + roles: + - role: kube-master + tags: + - kube-master + - role: cni + tags: + - cni + +# - hosts: all + +- name: Configure Kubernetes worker(s) + hosts: kubernetes-workers + become: true + roles: + - role: kube-worker + tags: + - kube-worker + - role: cni + tags: + - cni + - role: kube-addons + tags: + - kube-addons diff --git a/macros/recursive_template_copy.yml b/macros/recursive_template_copy.yml new file mode 100644 index 0000000..edf6786 --- /dev/null +++ b/macros/recursive_template_copy.yml @@ -0,0 +1,25 @@ +--- + +- name: "Create base directory for - {{ item.name }}" + file: + path: "{{ item.dest }}" + state: directory + +- name: "Create directories for - {{ item.name }}" + file: + path: "{{ item.dest }}/{{ template_dir.path }}" + state: directory + with_filetree: "{{ item.src }}" + when: template_dir.state == "directory" + loop_control: + loop_var: template_dir + +- name: "{{ item.name }}" + template: + src: "{{ template_file.src }}" + dest: "{{ item.dest }}/{{ template_file.path | replace('.j2', '') }}" + mode: "{{ (item.mode | default(0644)) }}" + with_filetree: "{{ item.src }}" + when: template_file.state == "file" + loop_control: + loop_var: template_file diff --git a/macros/template_copy.yml b/macros/template_copy.yml new file mode 100644 index 0000000..4f5b22f --- /dev/null +++ b/macros/template_copy.yml @@ -0,0 +1,16 @@ +--- + +- name: "Create directories for - {{ item.name }}" + file: + path: "{{ item.dest }}" + state: directory + +- name: "{{ item.name }}" + template: + src: "{{ template_file }}" + # Get filename, and remove ".j2" extension + dest: "{{ item.dest }}/{{ template_file | basename | replace('.j2', '') }}" + mode: "{{ (item.mode|default(0644)) }}" + with_fileglob: "{{ item.src }}/*" + loop_control: + loop_var: template_file diff --git a/roles/certs/defaults/main.yml b/roles/certs/defaults/main.yml new file mode 100644 index 0000000..e8ee63b --- /dev/null +++ b/roles/certs/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for roles/ca diff --git a/roles/certs/tasks/ca.yml b/roles/certs/tasks/ca.yml new file mode 100644 index 0000000..f2eee42 --- /dev/null +++ b/roles/certs/tasks/ca.yml @@ -0,0 +1,53 @@ +--- +# tasks file for roles + +- name: Create ca directory + file: + path: "{{ ca_base_resources_dir }}" + state: directory + +- name: Create CA root key + command: openssl genrsa + -out ca-key.pem 2048 + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: ca-key.pem + +- name: Create CA root certificate + command: openssl req -x509 + -new -nodes + -key ca-key.pem + -days 10000 + -out ca.pem + -subj "/CN=kube-ca" + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: ca.pem + +- name: Create cluster administrator key + command: openssl genrsa + -out admin-key.pem 2048 + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: admin-key.pem + +- name: Create cluster administrator csr + command: openssl req -new + -key admin-key.pem + -out admin.csr + -subj "/CN=kube-admin/O=system:masters" + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: admin.csr + +- name: Create cluster administrator certificate + command: openssl x509 -req + -in admin.csr + -CA ca.pem + -CAkey ca-key.pem + -CAcreateserial + -out admin.pem + -days 365 + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: admin.pem diff --git a/roles/certs/tasks/main.yml b/roles/certs/tasks/main.yml new file mode 100644 index 0000000..b0332a5 --- /dev/null +++ b/roles/certs/tasks/main.yml @@ -0,0 +1,19 @@ +--- + +- name: Create Kubernetes pki directory + file: + path: "{{ kube_pki_dir }}" + state: directory + delegate_to: localhost + +- name: Setup base Certificate Authority + include: ca.yml + delegate_to: localhost + +- name: Generate kubernetes master certificates + include: master-certs.yml + delegate_to: localhost + +- name: Generate kubernetes worker certificates + include: worker-certs.yml + delegate_to: localhost diff --git a/roles/certs/tasks/master-certs.yml b/roles/certs/tasks/master-certs.yml new file mode 100644 index 0000000..50155b5 --- /dev/null +++ b/roles/certs/tasks/master-certs.yml @@ -0,0 +1,43 @@ +--- +# tasks file for roles + +- name: Create ca directory + file: + path: "{{ ca_base_resources_dir }}" + state: directory + +- name: Add openssl configuration for Kuberentes API server + template: + src: master-openssl.cnf.j2 + dest: "{{ ca_base_resources_dir }}/master-openssl.cnf" + +- name: Create api-server key + command: openssl genrsa + -out apiserver-key.pem 2048 + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: apiserver-key.pem + +- name: Create api-server csr + command: openssl req -new + -key apiserver-key.pem + -out apiserver.csr + -subj "/CN=kube-apiserver/O=system:masters" + -config master-openssl.cnf + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: apiserver.csr + +- name: Create api-server certificate + command: openssl x509 -req + -in apiserver.csr + -CA ca.pem + -CAkey ca-key.pem + -CAcreateserial + -out apiserver.pem + -days 365 + -extensions v3_req + -extfile master-openssl.cnf + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: apiserver.pem diff --git a/roles/certs/tasks/worker-certs.yml b/roles/certs/tasks/worker-certs.yml new file mode 100644 index 0000000..436b04b --- /dev/null +++ b/roles/certs/tasks/worker-certs.yml @@ -0,0 +1,43 @@ +--- +# tasks file for roles + +- name: Create ca directory + file: + path: "{{ ca_base_resources_dir }}" + state: directory + +- name: Add openssl configuration for Kuberentes worker + template: + src: worker-openssl.cnf.j2 + dest: "{{ ca_base_resources_dir }}/worker-openssl.cnf" + +- name: Create worker key + command: openssl genrsa + -out worker-key.pem 2048 + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: worker-key.pem + +- name: Create worker csr + command: openssl req -new + -key worker-key.pem + -out worker.csr + -subj "/CN=kube-worker/O=system:masters" + -config worker-openssl.cnf + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: worker.csr + +- name: Create worker certificate + command: openssl x509 -req + -in worker.csr + -CA ca.pem + -CAkey ca-key.pem + -CAcreateserial + -out worker.pem + -days 365 + -extensions v3_req + -extfile worker-openssl.cnf + args: + chdir: "{{ ca_base_resources_dir }}" + # creates: worker.pem diff --git a/roles/certs/templates/etcd-openssl.cnf.j2 b/roles/certs/templates/etcd-openssl.cnf.j2 new file mode 100644 index 0000000..9144141 --- /dev/null +++ b/roles/certs/templates/etcd-openssl.cnf.j2 @@ -0,0 +1,22 @@ +# {{ ansible_managed }} + +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names +[alt_names] +DNS.1 = kubernetes +DNS.2 = kubernetes.default +DNS.3 = kubernetes.default.svc +DNS.4 = kubernetes.default.svc.{{ dns_domain }} +DNS.5 = localhost + +IP.1 = {{ kube_service_network }} +IP.2 = 127.0.0.1 +{% for host in groups['kubernetes'] %} +IP.{{ loop.index + 2 }} = {{ hostvars[host]['ansible_env']['COREOS_PUBLIC_IPV4'] }} +{% endfor %} diff --git a/roles/certs/templates/master-openssl.cnf.j2 b/roles/certs/templates/master-openssl.cnf.j2 new file mode 100644 index 0000000..9144141 --- /dev/null +++ b/roles/certs/templates/master-openssl.cnf.j2 @@ -0,0 +1,22 @@ +# {{ ansible_managed }} + +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names +[alt_names] +DNS.1 = kubernetes +DNS.2 = kubernetes.default +DNS.3 = kubernetes.default.svc +DNS.4 = kubernetes.default.svc.{{ dns_domain }} +DNS.5 = localhost + +IP.1 = {{ kube_service_network }} +IP.2 = 127.0.0.1 +{% for host in groups['kubernetes'] %} +IP.{{ loop.index + 2 }} = {{ hostvars[host]['ansible_env']['COREOS_PUBLIC_IPV4'] }} +{% endfor %} diff --git a/roles/certs/templates/worker-openssl.cnf.j2 b/roles/certs/templates/worker-openssl.cnf.j2 new file mode 100644 index 0000000..57c17d6 --- /dev/null +++ b/roles/certs/templates/worker-openssl.cnf.j2 @@ -0,0 +1,15 @@ +# {{ ansible_managed }} + +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] + +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names + +[alt_names] +IP.1 = {{ ansible_env.COREOS_PUBLIC_IPV4 }} +IP.2 = 127.0.0.1 diff --git a/roles/cni/defaults/main.yml b/roles/cni/defaults/main.yml new file mode 100644 index 0000000..20787dd --- /dev/null +++ b/roles/cni/defaults/main.yml @@ -0,0 +1,2 @@ +cni_bin_dir: /opt/cni/bin +cni_plugins_version: 0.7.1 diff --git a/roles/cni/tasks/main.yml b/roles/cni/tasks/main.yml new file mode 100644 index 0000000..6f63fd7 --- /dev/null +++ b/roles/cni/tasks/main.yml @@ -0,0 +1,38 @@ +--- + +- name: Enable IP forwarding + command: sysctl net.ipv4.ip_forward=1 + +- name: Set /proc/sys/net/bridge/bridge-nf-call-iptables to 1 + command: sysctl net.bridge.bridge-nf-call-iptables=1 + +- name: Create directory for CNI binaries + file: + path: "{{ cni_bin_dir }}" + state: directory + +- name: "Add CNI plugins to {{ cni_bin_dir }}" + shell: "curl -fsSL https://github.com/containernetworking/plugins/releases/download/v{{ cni_plugins_version }}/cni-plugins-amd64-v{{ cni_plugins_version }}.tgz | tar xvz -C {{ cni_bin_dir }}" + args: + executable: /bin/bash + warn: false + +- name: Create CNI-config directory + file: + path: /etc/cni/net.d + state: directory + +# - name: Add weavenet-conflist template +# template: +# src: 00-weave.conflist.j2 +# dest: /etc/cni/net.d/00-weave.conflist + +- name: Wait for API-Server to be ready + wait_for: + timeout: 60 + +- name: Setup Weave Net + become: false + shell: "{{ binary_copy_path }}/kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d \"\n\")" + args: + executable: /bin/bash diff --git a/roles/cni/templates/00-weave.conflist.j2 b/roles/cni/templates/00-weave.conflist.j2 new file mode 100644 index 0000000..2e4b780 --- /dev/null +++ b/roles/cni/templates/00-weave.conflist.j2 @@ -0,0 +1,18 @@ +{ + "cniVersion": "0.3.0", + "name": "mynet", + "plugins": [ + { + "name": "weave", + "type": "weave-net", + "hairpinMode": true + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + }, + "snat": true + } + ] +} diff --git a/roles/distribute-certs/tasks/main.yml b/roles/distribute-certs/tasks/main.yml new file mode 100644 index 0000000..a2a0564 --- /dev/null +++ b/roles/distribute-certs/tasks/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Copy certs to {{ kube_pki_dir }} + copy: + src: "{{ ca_base_resources_dir }}/" + dest: "{{ kube_pki_dir }}" diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml new file mode 100644 index 0000000..b4c7722 --- /dev/null +++ b/roles/etcd/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for roles/ca + +etcd_image_repo : "quay.io/coreos/etcd" +etcd_version : "v3.3.8" +etcd_compose_file: "{{ compose_manifest_dir }}/etcd.yml" diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml new file mode 100644 index 0000000..f224edf --- /dev/null +++ b/roles/etcd/tasks/main.yml @@ -0,0 +1,32 @@ +--- +# tasks file for roles + +- name: Create compose-manifest directory + file: + path: "{{ compose_manifest_dir }}" + state: directory + +- name: Copy etcd compose-file to compose-manifest directory + template: + src: etcd.yml.j2 + dest: "{{ etcd_compose_file }}" + +- name: Copy etcd service-file to systemd + template: + src: etcd.service.j2 + dest: "{{ systemd_service_dir }}/etcd.service" + +- name: Start etcd service + systemd: + name: etcd + state: started + enabled: yes + daemon_reload: yes + +- name: Wait for etcd port to be open + wait_for: + host: 127.0.0.1 + port: 2379 + delay: 5 + connect_timeout: 2 + timeout: 300 diff --git a/roles/etcd/templates/etcd.service.j2 b/roles/etcd/templates/etcd.service.j2 new file mode 100644 index 0000000..5a4cd14 --- /dev/null +++ b/roles/etcd/templates/etcd.service.j2 @@ -0,0 +1,22 @@ +# {{ ansible_managed }} + +[Unit] +Description=ETCD Server +Documentation=https://coreos.com/etcd/docs/latest +After=docker.service +Wants=docker.socket +StartLimitIntervalSec=10 +StartLimitBurst=3 + +[Service] +ExecStart={{ docker_compose_path }} -f {{ etcd_compose_file }} up -d + +{% for item in groups['etcd'] %} +ExecStartPre=-{{ docker_path }}/docker rm -f etcd{{ loop.index }} +{% endfor %} + +Restart=on-failure +RestartSec=10s + +[Install] +WantedBy=multi-user.target diff --git a/roles/etcd/templates/etcd.yml.j2 b/roles/etcd/templates/etcd.yml.j2 new file mode 100644 index 0000000..67f1899 --- /dev/null +++ b/roles/etcd/templates/etcd.yml.j2 @@ -0,0 +1,24 @@ +# {{ ansible_managed }} +--- + +version: "3.6" + +services: + {{ ansible_hostname }}: + container_name: "{{ ansible_hostname }}" + image: "{{ etcd_image_repo }}:{{ etcd_version }}" + ports: + - 2379:2379 + - 2380:2380 + volumes: + - etcd-data:/etcd-data + environment: + ETCD_NAME: "{{ ansible_hostname }}" + ETCD_ADVERTISE_CLIENT_URLS: "http://{{ ansible_env.COREOS_PUBLIC_IPV4 }}:2379" + ETCD_INITIAL_ADVERTISE_PEER_URLS: "http://{{ ansible_env.COREOS_PUBLIC_IPV4 }}:2380" + ETCD_INITIAL_CLUSTER: "{{ ansible_hostname }}=http://{{ ansible_env.COREOS_PUBLIC_IPV4 }}:2380" + ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379" + ETCD_LISTEN_PEER_URLS: "http://0.0.0.0:2380" + +volumes: + etcd-data: diff --git a/roles/kube-addons/defaults/main.yml b/roles/kube-addons/defaults/main.yml new file mode 100644 index 0000000..27f4ef5 --- /dev/null +++ b/roles/kube-addons/defaults/main.yml @@ -0,0 +1,15 @@ +ansible_templates_dir: "{{ role_path }}/templates" +kube_addons_dir: "{{ base_kube_dir }}/addons" + +# For CoreDNS +image_arch: "{{host_architecture | default('amd64')}}" +coredns_image_repo: "gcr.io/google-containers/coredns" +coredns_version: "{%- if image_arch != 'amd64' -%}1.1.3{%- else -%}1.2.2{%- endif -%}" +coredns_image_tag: "{{ coredns_version }}{%- if image_arch != 'amd64' -%}__{{ image_arch}}_linux{%- endif -%}" + +coredns_replicas: 1 +coredns_ordinal_suffix: -primary + +dns_memory_limit: 170Mi +dns_cpu_requests: 100m +dns_memory_requests: 70Mi diff --git a/roles/kube-addons/tasks/main.yml b/roles/kube-addons/tasks/main.yml new file mode 100644 index 0000000..66c5421 --- /dev/null +++ b/roles/kube-addons/tasks/main.yml @@ -0,0 +1,71 @@ +--- + +- name: Recursively copy templates + include: "{{ playbook_dir }}/macros/recursive_template_copy.yml" + with_items: + # Dashboard + - name: Add Kubernetes dashboard + src : "{{ ansible_templates_dir }}/dashboard" + dest: "{{ kube_addons_dir }}" + # CoreDNS + - name: Add CoreDNS + src : "{{ ansible_templates_dir }}/core-dns" + dest: "{{ kube_addons_dir }}" + +- name: Copy HAProxy addon-manifest + template: + src: "{{ ansible_templates_dir }}/haproxy/haproxy.yml.j2" + dest: "{{ kube_addons_dir }}/haproxy.yml" + vars: + worker_id: "{{ item | replace('kube-workers-', '') }}" + with_items: "{{ groups['kubernetes-workers'] }}" + +- name: "Create Kubernetes config-files directory" + file: + path: "{{ kube_config_dir }}" + state: directory + +- name: Copy HAProxy config-file + template: + src: "{{ ansible_templates_dir }}/configs/haproxy.cfg.j2" + dest: "{{ kube_config_dir }}/haproxy.cfg" + +- name: Apply Kubernetes addons + command: 'kubectl apply -f "{{ item.src }}"' + with_filetree: "{{ kube_addons_dir }}" + when: item.state == "file" + become: false + +- name: Wait for HAProxy to start + wait_for: + host: "{{ ansible_env.COREOS_PUBLIC_IPV4 }}" + port: "{{ haproxy_port }}" + delay: "5" + connect_timeout: "2" + timeout: "300" + +- name: Get system user-name + command: whoami + register: user_name + become: false + +- name: Get system user-home + shell: "getent passwd {{ user_name.stdout }} | cut -d: -f6" + register: user_home + +- name: Switch kubeconfigs to use reverse-proxy server + command: > + kubectl config --kubeconfig={{ item.config }} set-cluster {{ item.cluster }} + --server=https://{{ ansible_env.COREOS_PUBLIC_IPV4 }}:{{ haproxy_port }} + with_items: + - config: "{{ kube_config_dir }}/kubeconfig-kubelet.yaml" + cluster: local + - config: "{{ kube_config_dir }}/kubeconfig-proxy.yaml" + cluster: local + - config: "{{ user_home.stdout }}/.kube/config" + cluster: default-cluster + +- name: Reload Kubelet to use new reverse-proxy config + systemd: + name: kubelet + state: restarted diff --git a/roles/kube-addons/templates/configs/haproxy.cfg.j2 b/roles/kube-addons/templates/configs/haproxy.cfg.j2 new file mode 100644 index 0000000..2d9b0cf --- /dev/null +++ b/roles/kube-addons/templates/configs/haproxy.cfg.j2 @@ -0,0 +1,66 @@ +# {{ ansible_managed }} + +global + log /var/log/haproxy local0 + log /var/log/haproxy local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin + stats timeout 30s + user haproxy + group haproxy + # daemon + + # Default SSL material locations + ca-base /etc/ssl/certs + crt-base /etc/ssl/private + + # Default ciphers to use on SSL-enabled listening sockets. + # For more information, see ciphers(1SSL). This list is from: + # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ + ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS + ssl-default-bind-options no-sslv3 + +defaults + log global + mode tcp + + option tcplog + option dontlognull + option redispatch + + backlog 4096 + + timeout connect 5s + timeout client 50s + timeout queue 5s + timeout server 50s + + errorfile 400 /usr/local/etc/haproxy/errors/400.http + errorfile 403 /usr/local/etc/haproxy/errors/403.http + errorfile 408 /usr/local/etc/haproxy/errors/408.http + errorfile 500 /usr/local/etc/haproxy/errors/500.http + errorfile 502 /usr/local/etc/haproxy/errors/502.http + errorfile 503 /usr/local/etc/haproxy/errors/503.http + errorfile 504 /usr/local/etc/haproxy/errors/504.http + +frontend k8s-api + bind *:{{ haproxy_port }} + default_backend k8s-api + +backend k8s-api + mode tcp + option tcp-check + balance roundrobin + default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 +{% for item in groups['kubernetes-masters'] %} + server apiserver{{ loop.index }} {{ hostvars[item]['ansible_env']['COREOS_PUBLIC_IPV4'] }}:{{ apiserver_port }} check port {{ apiserver_port }} +{% endfor %} + +listen stats + mode http + bind *:8181 + stats enable + stats uri / + stats realm Haproxy\ Statistics + stats auth username:password + stats refresh 10s diff --git a/roles/kube-addons/templates/core-dns/coredns-clusterrole.yml.j2 b/roles/kube-addons/templates/core-dns/coredns-clusterrole.yml.j2 new file mode 100644 index 0000000..5611e4f --- /dev/null +++ b/roles/kube-addons/templates/core-dns/coredns-clusterrole.yml.j2 @@ -0,0 +1,21 @@ +# {{ ansible_managed }} +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + addonmanager.kubernetes.io/mode: Reconcile + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch diff --git a/roles/kube-addons/templates/core-dns/coredns-clusterrolebinding.yml.j2 b/roles/kube-addons/templates/core-dns/coredns-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..aa8c875 --- /dev/null +++ b/roles/kube-addons/templates/core-dns/coredns-clusterrolebinding.yml.j2 @@ -0,0 +1,20 @@ +# {{ ansible_managed }} +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + addonmanager.kubernetes.io/mode: EnsureExists + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system diff --git a/roles/kube-addons/templates/core-dns/coredns-config.yml.j2 b/roles/kube-addons/templates/core-dns/coredns-config.yml.j2 new file mode 100644 index 0000000..6792682 --- /dev/null +++ b/roles/kube-addons/templates/core-dns/coredns-config.yml.j2 @@ -0,0 +1,24 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +data: + Corefile: | + .:53 { + errors + health + kubernetes {{ dns_domain }} in-addr.arpa ip6.arpa { + pods insecure + upstream /etc/resolv.conf + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . /etc/resolv.conf + cache 30 + } diff --git a/roles/kube-addons/templates/core-dns/coredns-deployment.yml.j2 b/roles/kube-addons/templates/core-dns/coredns-deployment.yml.j2 new file mode 100644 index 0000000..6cc6411 --- /dev/null +++ b/roles/kube-addons/templates/core-dns/coredns-deployment.yml.j2 @@ -0,0 +1,100 @@ +# {{ ansible_managed }} +--- + +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: coredns{{ coredns_ordinal_suffix | default('') }} + namespace: kube-system + labels: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "CoreDNS" +spec: + replicas: {{ coredns_replicas }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 10% + selector: + matchLabels: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + template: + metadata: + labels: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: +{% if kubernetes_version is version_compare('v1.11.1', '>=') %} + priorityClassName: system-cluster-critical +{% endif %} + serviceAccountName: coredns + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: "CriticalAddonsOnly" + operator: "Exists" + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node-role.kubernetes.io/master + operator: In + values: + - "true" + containers: + - name: coredns + image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}" + imagePullPolicy: IfNotPresent + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: {{ dns_memory_limit }} + requests: + cpu: {{ dns_cpu_requests }} + memory: {{ dns_memory_requests }} + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile diff --git a/roles/kube-addons/templates/core-dns/coredns-sa.yml.j2 b/roles/kube-addons/templates/core-dns/coredns-sa.yml.j2 new file mode 100644 index 0000000..0159ef6 --- /dev/null +++ b/roles/kube-addons/templates/core-dns/coredns-sa.yml.j2 @@ -0,0 +1,11 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile diff --git a/roles/kube-addons/templates/core-dns/coredns-svc.yml.j2 b/roles/kube-addons/templates/core-dns/coredns-svc.yml.j2 new file mode 100644 index 0000000..1037015 --- /dev/null +++ b/roles/kube-addons/templates/core-dns/coredns-svc.yml.j2 @@ -0,0 +1,31 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Service +metadata: + name: coredns{{ coredns_ordinal_suffix | default('') }} + namespace: kube-system + labels: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "CoreDNS" + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "9153" + prometheus.io/scrape: "true" +spec: + selector: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + clusterIP: {{ dns_service_ip }} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP diff --git a/roles/kube-addons/templates/dashboard/dashboard-clusterrolebinding.yml.j2 b/roles/kube-addons/templates/dashboard/dashboard-clusterrolebinding.yml.j2 new file mode 100644 index 0000000..ffa9fb6 --- /dev/null +++ b/roles/kube-addons/templates/dashboard/dashboard-clusterrolebinding.yml.j2 @@ -0,0 +1,17 @@ +# {{ ansible_managed }} +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard + labels: + k8s-app: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system diff --git a/roles/kube-addons/templates/dashboard/dashboard-configmap.yaml.j2 b/roles/kube-addons/templates/dashboard/dashboard-configmap.yaml.j2 new file mode 100644 index 0000000..865b8e9 --- /dev/null +++ b/roles/kube-addons/templates/dashboard/dashboard-configmap.yaml.j2 @@ -0,0 +1,12 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + k8s-app: kubernetes-dashboard + # Allows editing resource and makes sure it is created first. + addonmanager.kubernetes.io/mode: EnsureExists + name: kubernetes-dashboard-settings + namespace: kube-system diff --git a/roles/kube-addons/templates/dashboard/dashboard-controller.yaml.j2 b/roles/kube-addons/templates/dashboard/dashboard-controller.yaml.j2 new file mode 100644 index 0000000..68eef3c --- /dev/null +++ b/roles/kube-addons/templates/dashboard/dashboard-controller.yaml.j2 @@ -0,0 +1,74 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + addonmanager.kubernetes.io/mode: Reconcile + name: kubernetes-dashboard + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubernetes-dashboard + namespace: kube-system + labels: + k8s-app: kubernetes-dashboard + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + seccomp.security.alpha.kubernetes.io/pod: "docker/default" + spec: + priorityClassName: system-cluster-critical + containers: + - name: kubernetes-dashboard + image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3 + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 50m + memory: 100Mi + ports: + - containerPort: 8443 + protocol: TCP + args: + # PLATFORM-SPECIFIC ARGS HERE + - --auto-generate-certificates + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + - name: tmp-volume + mountPath: /tmp + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" diff --git a/roles/kube-addons/templates/dashboard/dashboard-secret.yaml.j2 b/roles/kube-addons/templates/dashboard/dashboard-secret.yaml.j2 new file mode 100644 index 0000000..931d6f3 --- /dev/null +++ b/roles/kube-addons/templates/dashboard/dashboard-secret.yaml.j2 @@ -0,0 +1,13 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + # Allows editing resource and makes sure it is created first. + addonmanager.kubernetes.io/mode: EnsureExists + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque diff --git a/roles/kube-addons/templates/dashboard/dashboard-service.yaml.j2 b/roles/kube-addons/templates/dashboard/dashboard-service.yaml.j2 new file mode 100644 index 0000000..16d22f2 --- /dev/null +++ b/roles/kube-addons/templates/dashboard/dashboard-service.yaml.j2 @@ -0,0 +1,18 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Service +metadata: + name: kubernetes-dashboard + namespace: kube-system + labels: + k8s-app: kubernetes-dashboard + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + k8s-app: kubernetes-dashboard + ports: + - port: 443 + targetPort: 8443 diff --git a/roles/kube-addons/templates/haproxy/haproxy.yml.j2 b/roles/kube-addons/templates/haproxy/haproxy.yml.j2 new file mode 100644 index 0000000..e650ac1 --- /dev/null +++ b/roles/kube-addons/templates/haproxy/haproxy.yml.j2 @@ -0,0 +1,40 @@ +# {{ ansible_managed }} +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: haproxy-{{ worker_id }} + namespace: kube-system + labels: + k8s-app: haproxy-{{ worker_id }} + scheduler.alpha.kubernetes.io/critical-pod: "" + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: haproxy-{{ worker_id }} + template: + metadata: + labels: + k8s-app: haproxy-{{ worker_id }} + spec: + hostNetwork: true + containers: + - name: haproxy-{{ worker_id }} + image: jaskaranbir/haproxy:1.0.0 + env: + - name: HAPROXY_CONFIG + value: "{{ kube_config_dir }}/haproxy.cfg" + - name: HAPROXY_ARGS + value: "-L haproxy-{{ worker_id }}" + volumeMounts: + - name: config-file + mountPath: "{{ kube_config_dir }}/haproxy.cfg" + readOnly: true + volumes: + - name: config-file + hostPath: + path: "{{ kube_config_dir }}/haproxy.cfg" diff --git a/roles/kube-master/defaults/main.yml b/roles/kube-master/defaults/main.yml new file mode 100644 index 0000000..e007ec0 --- /dev/null +++ b/roles/kube-master/defaults/main.yml @@ -0,0 +1,50 @@ +--- +# defaults file for roles/ca + +hyperkube_image: "{{ hyperkube_image_repo }}:{{ kubernetes_version }}" + +ansible_templates_dir: "{{ role_path }}/templates" +network_plugin: "cni" +docker_daemon_graph: "/var/lib/docker" +kubelet_compose_file: "{{ compose_manifest_dir }}/kubelet.yml" + +etcd_endpoints: | + {%- for item in groups['etcd'] -%} + http://{{ hostvars[item]['ansible_env']['COREOS_PUBLIC_IPV4'] }}:2379 + {%- if not loop.last -%} + , + {%- endif -%} + {%- endfor -%} + +# Resource requests/limits are defined but not actually applied +# Optimize the values as per your setup and renable them +# (uncomment respective blocks in kubernetes components) +# Abrupt values might cause timeouts (especially TLS handshake timeouts) + +kube_apiserver_requests_cpu: "200m" +kube_apiserver_requests_memory: "512Mi" +kube_apiserver_limits_cpu: "200m" # requests and limits should be same to get Guaranteed QoS +kube_apiserver_limits_memory: "512Mi" + +kube_controller_manager_requests_cpu: "300m" +kube_controller_manager_requests_memory: "768Mi" +kube_controller_manager_limits_cpu: "300m" # requests and limits should be same to get Guaranteed QoS +kube_controller_manager_limits_memory: "768Mi" +kube_controller_manager_kubeconfig: "{{ kube_config_dir }}/kubeconfig-controller-manager.yaml" + +kube_kubelet_config: "{{ kube_config_dir }}/config-kubelet.yaml" +kube_kubelet_kubeconfig: "{{ kube_config_dir }}/kubeconfig-kubelet.yaml" + +kube_proxy_requests_cpu: "200m" +kube_proxy_requests_memory: "128Mi" +kube_proxy_limits_cpu: "200m" # requests and limits should be same to get Guaranteed QoS +kube_proxy_limits_memory: "128Mi" +kube_proxy_config: "{{ kube_config_dir }}/config-proxy.yaml" +kube_proxy_kubeconfig: "{{ kube_config_dir }}/kubeconfig-proxy.yaml" + +kube_scheduler_requests_cpu: "100m" +kube_scheduler_requests_memory: "256Mi" +kube_scheduler_limits_cpu: "100m" # requests and limits should be same to get Guaranteed QoS +kube_scheduler_limits_memory: "256Mi" +kube_scheduler_config: "{{ kube_config_dir }}/config-scheduler.yaml" +kube_scheduler_kubeconfig: "{{ kube_config_dir }}/kubeconfig-scheduler.yaml" diff --git a/roles/kube-master/tasks/main.yml b/roles/kube-master/tasks/main.yml new file mode 100644 index 0000000..cb3e8b0 --- /dev/null +++ b/roles/kube-master/tasks/main.yml @@ -0,0 +1,41 @@ +# tasks file for roles +--- + +- name: Recursively copy templates + include: "{{ playbook_dir }}/macros/recursive_template_copy.yml" + with_items: + # Configs + - name: Add Kubernetes configs + src : "{{ ansible_templates_dir }}/configs" + dest: "{{ kube_config_dir }}" + +- name: Non-recursively copy templates + include: "{{ playbook_dir }}/macros/template_copy.yml" + with_items: + # Docker-Compose manifests + - name: Add Docker-Compose manifests + src : "{{ ansible_templates_dir }}/manifests/docker-compose" + dest: "{{ compose_manifest_dir }}" + # Kubernetes Manifests + - name: Add Kubernetes manifests + src : "{{ ansible_templates_dir }}/manifests/kubernetes" + dest: "{{ kube_manifest_dir }}" + # Systemd Services + - name: Add component service-files + src : "{{ ansible_templates_dir }}/services" + dest: "/etc/systemd/system" + +- name: Start Kubelet + systemd: + name: kubelet + state: started + enabled: true + daemon_reload: yes + +- name: Wait for apiserver to start + wait_for: + host: "127.0.0.1" + port: "8080" + delay: "5" + connect_timeout: "2" + timeout: "1800" #For initial Docker-Image downloads/setups diff --git a/roles/kube-master/templates/configs/config-kubelet.yaml.j2 b/roles/kube-master/templates/configs/config-kubelet.yaml.j2 new file mode 100644 index 0000000..d4425b5 --- /dev/null +++ b/roles/kube-master/templates/configs/config-kubelet.yaml.j2 @@ -0,0 +1,21 @@ +# {{ ansible_managed }} +--- + +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +address: {{ ansible_env.COREOS_PUBLIC_IPV4 }} +staticPodPath: {{ kube_manifest_dir }} +clusterDNS: + - {{ dns_service_ip }} +clusterDomain: "{{ dns_domain }}" +authentication: + anonymous: + enabled: true + webhook: + enabled: false + x509: + clientCAFile: {{ kube_pki_dir }}/ca.pem +authorization: + mode: AlwaysAllow +tlsCertFile: "{{ kube_pki_dir }}/apiserver.pem" +tlsPrivateKeyFile: "{{ kube_pki_dir }}/apiserver-key.pem" diff --git a/roles/kube-master/templates/configs/config-proxy.yaml.j2 b/roles/kube-master/templates/configs/config-proxy.yaml.j2 new file mode 100644 index 0000000..4be584d --- /dev/null +++ b/roles/kube-master/templates/configs/config-proxy.yaml.j2 @@ -0,0 +1,12 @@ +# {{ ansible_managed }} +--- + +kind: KubeProxyConfiguration +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +clusterCIDR: {{ kube_pod_network }} +hostnameOverride: {{ ansible_hostname }} +bindAddress: {{ ansible_env.COREOS_PUBLIC_IPV4 }} +mode: iptables +healthzBindAddress: 127.0.0.1 +clientConnection: + kubeconfig: "{{ kube_proxy_kubeconfig }}" diff --git a/roles/kube-master/templates/configs/config-scheduler.yaml.j2 b/roles/kube-master/templates/configs/config-scheduler.yaml.j2 new file mode 100644 index 0000000..b95cc6f --- /dev/null +++ b/roles/kube-master/templates/configs/config-scheduler.yaml.j2 @@ -0,0 +1,9 @@ +# {{ ansible_managed }} +--- + +apiVersion: kubescheduler.config.k8s.io/v1alpha1 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "{{ kube_scheduler_kubeconfig }}" +leaderElection: + leaderElect: true diff --git a/roles/kube-master/templates/configs/kubeconfig-controller-manager.yaml.j2 b/roles/kube-master/templates/configs/kubeconfig-controller-manager.yaml.j2 new file mode 100644 index 0000000..f2745c7 --- /dev/null +++ b/roles/kube-master/templates/configs/kubeconfig-controller-manager.yaml.j2 @@ -0,0 +1,21 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: "{{ kube_pki_dir }}/ca.pem" + server: "https://{{ ansible_env.COREOS_PUBLIC_IPV4 }}:{{ apiserver_port }}" +users: +- name: kubelet + user: + client-certificate: "{{ kube_pki_dir }}/apiserver.pem" + client-key: "{{ kube_pki_dir }}/apiserver-key.pem" +contexts: +- context: + cluster: local + user: kubelet + name: kubelet-context +current-context: kubelet-context diff --git a/roles/kube-master/templates/configs/kubeconfig-kubelet.yaml.j2 b/roles/kube-master/templates/configs/kubeconfig-kubelet.yaml.j2 new file mode 100644 index 0000000..f2745c7 --- /dev/null +++ b/roles/kube-master/templates/configs/kubeconfig-kubelet.yaml.j2 @@ -0,0 +1,21 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: "{{ kube_pki_dir }}/ca.pem" + server: "https://{{ ansible_env.COREOS_PUBLIC_IPV4 }}:{{ apiserver_port }}" +users: +- name: kubelet + user: + client-certificate: "{{ kube_pki_dir }}/apiserver.pem" + client-key: "{{ kube_pki_dir }}/apiserver-key.pem" +contexts: +- context: + cluster: local + user: kubelet + name: kubelet-context +current-context: kubelet-context diff --git a/roles/kube-master/templates/configs/kubeconfig-proxy.yaml.j2 b/roles/kube-master/templates/configs/kubeconfig-proxy.yaml.j2 new file mode 100644 index 0000000..f5dd76c --- /dev/null +++ b/roles/kube-master/templates/configs/kubeconfig-proxy.yaml.j2 @@ -0,0 +1,21 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: "{{ kube_pki_dir }}/ca.pem" + server: "https://{{ ansible_env.COREOS_PUBLIC_IPV4 }}:{{ apiserver_port }}" +users: +- name: kube-proxy + user: + client-certificate: "{{ kube_pki_dir }}/apiserver.pem" + client-key: "{{ kube_pki_dir }}/apiserver-key.pem" +contexts: +- context: + cluster: local + user: kube-proxy + name: kube-proxy-default +current-context: kube-proxy-default diff --git a/roles/kube-master/templates/configs/kubeconfig-scheduler.yaml.j2 b/roles/kube-master/templates/configs/kubeconfig-scheduler.yaml.j2 new file mode 100644 index 0000000..32e25b8 --- /dev/null +++ b/roles/kube-master/templates/configs/kubeconfig-scheduler.yaml.j2 @@ -0,0 +1,21 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Config +clusters: + - name: local + cluster: + certificate-authority: "{{ kube_pki_dir }}/ca.pem" + server: "https://{{ ansible_env.COREOS_PUBLIC_IPV4 }}:{{ apiserver_port }}" +users: + - name: kube-proxy + user: + client-certificate: "{{ kube_pki_dir }}/apiserver.pem" + client-key: "{{ kube_pki_dir }}/apiserver-key.pem" +contexts: + - context: + cluster: local + user: kube-proxy + name: kube-proxy-default +current-context: kube-proxy-default diff --git a/roles/kube-master/templates/manifests/docker-compose/kubelet.yml.j2 b/roles/kube-master/templates/manifests/docker-compose/kubelet.yml.j2 new file mode 100644 index 0000000..bbd162c --- /dev/null +++ b/roles/kube-master/templates/manifests/docker-compose/kubelet.yml.j2 @@ -0,0 +1,41 @@ +# {{ ansible_managed }} +--- + +version: "3.6" + +services: + kubelet: + container_name: kubelet + image: "{{ hyperkube_image }}" + network_mode: host + pid: host + privileged: true + restart: on-failure:5 + command: > + ./hyperkube kubelet + --cni-conf-dir=/etc/cni/net.d + --config={{ kube_kubelet_config }} + --container-runtime=docker + --hostname-override={{ ansible_env.COREOS_PUBLIC_IPV4 }} + --kubeconfig={{ kube_kubelet_kubeconfig }} + --network-plugin={{ network_plugin }} + --node-labels=master,node-role.kubernetes.io/master="" + --register-with-taints=node-role.kubernetes.io/master=true:NoSchedule + volumes: + # RW volumes + - "{{ docker_daemon_graph }}:{{ docker_daemon_graph }}:rw" + - /dev:/dev:rw + - /etc/cni:/etc/cni:rw + - /etc/resolv.conf:/etc/resolv.conf:rw + - /usr/share/ca-certificates:/usr/share/ca-certificates:rw + - /var/log:/var/log:rw + - /var/run:/var/run:rw + # RO volumes + - "{{ base_kube_dir }}:{{ base_kube_dir }}:ro" + - /etc/os-release:/etc/os-release:ro + - /etc/ssl:/etc/ssl:ro + - /opt/cni:/opt/cni:ro + - /sys:/sys:ro + # Shared volumes + - /var/lib/kubelet:/var/lib/kubelet:shared + - /var/lib/cni:/var/lib/cni:shared diff --git a/roles/kube-master/templates/manifests/kubernetes/kube-apiserver.yaml.j2 b/roles/kube-master/templates/manifests/kubernetes/kube-apiserver.yaml.j2 new file mode 100644 index 0000000..72119e6 --- /dev/null +++ b/roles/kube-master/templates/manifests/kubernetes/kube-apiserver.yaml.j2 @@ -0,0 +1,90 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Pod +metadata: + name: kube-apiserver + namespace: kube-system + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + seccomp.security.alpha.kubernetes.io/pod: docker/default + labels: + tier: control-plane + component: kube-apiserver +spec: + hostNetwork: true + containers: + - name: kube-apiserver + image: "{{ hyperkube_image }}" + command: + - /hyperkube + - apiserver + - --advertise-address={{ ansible_env.COREOS_PUBLIC_IPV4 }} + - --allow-privileged=true + - --apiserver-count={{ groups['kubernetes-masters'] | length }} + - --authorization-mode=RBAC + - --bind-address={{ ansible_env.COREOS_PUBLIC_IPV4 }} + - --client-ca-file={{ kube_pki_dir }}/ca.pem + - --enable-admission-plugins=DefaultStorageClass,LimitRanger,NamespaceLifecycle,NodeRestriction,ResourceQuota,ServiceAccount + - --etcd-servers={{ etcd_endpoints }} + - --kubelet-client-certificate={{ kube_pki_dir }}/apiserver.pem + - --kubelet-client-key={{ kube_pki_dir }}/apiserver-key.pem + - --runtime-config=extensions/v1beta1=true,extensions/v1beta1/networkpolicies=true + - --secure-port={{ apiserver_port }} + - --service-account-key-file={{ kube_pki_dir }}/apiserver-key.pem + - --service-cluster-ip-range={{ service_ip_range }} + - --tls-cert-file={{ kube_pki_dir }}/apiserver.pem + - --tls-private-key-file={{ kube_pki_dir }}/apiserver-key.pem + # Disable hostNetwork first before using hostPort + # ports: + # - containerPort: {{ apiserver_port }} + # hostPort: {{ apiserver_port }} + # name: https + # - containerPort: 8080 + # hostPort: 8080 + # name: local + # resources: + # requests: + # cpu: {{ kube_apiserver_requests_cpu }} + # memory: {{ kube_apiserver_requests_memory }} + # limits: + # cpu: {{ kube_apiserver_limits_cpu }} + # memory: {{ kube_apiserver_limits_memory }} + livenessProbe: + httpGet: + scheme: HTTP + host: 127.0.0.1 + port: 8080 + path: /healthz + # ApiServer can take its time starting up + initialDelaySeconds: 90 + timeoutSeconds: 15 + volumeMounts: + - name: ssl-certs-kubernetes + mountPath: "{{ kube_pki_dir }}" + readOnly: true + - name: ssl-certs-host + mountPath: /etc/ssl/certs + readOnly: true + - name: logfile + mountPath: /var/log/kube-apiserver.log + readOnly: false + - name: auditlogfile + mountPath: /var/log/kube-apiserver-audit.log + readOnly: false + volumes: + - name: ssl-certs-kubernetes + hostPath: + path: "{{ kube_pki_dir }}" + - name: ssl-certs-host + hostPath: + path: /usr/share/ca-certificates + - name: logfile + hostPath: + path: /var/log/kube-apiserver.log + type: FileOrCreate + - name: auditlogfile + hostPath: + path: /var/log/kube-apiserver-audit.log + type: FileOrCreate diff --git a/roles/kube-master/templates/manifests/kubernetes/kube-controller-manager.yaml.j2 b/roles/kube-master/templates/manifests/kubernetes/kube-controller-manager.yaml.j2 new file mode 100644 index 0000000..ed5dc2c --- /dev/null +++ b/roles/kube-master/templates/manifests/kubernetes/kube-controller-manager.yaml.j2 @@ -0,0 +1,70 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + namespace: kube-system + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + seccomp.security.alpha.kubernetes.io/pod: docker/default + labels: + tier: control-plane + component: kube-controller-manager +spec: + hostNetwork: true + containers: + - name: kube-controller-manager + image: "{{ hyperkube_image }}" + command: + - /hyperkube + - controller-manager + - --cluster-signing-cert-file={{ kube_pki_dir }}/apiserver.pem + - --cluster-signing-key-file={{ kube_pki_dir }}/apiserver-key.pem + - --kubeconfig={{ kube_controller_manager_kubeconfig }} + - --leader-elect=true + - --root-ca-file={{ kube_pki_dir }}/ca.pem + - --service-account-private-key-file={{ kube_pki_dir }}/apiserver-key.pem + # resources: + # requests: + # cpu: {{ kube_controller_manager_requests_cpu }} + # memory: {{ kube_controller_manager_requests_memory }} + # limits: + # cpu: {{ kube_controller_manager_limits_cpu }} + # memory: {{ kube_controller_manager_limits_memory }} + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10252 + initialDelaySeconds: 15 + timeoutSeconds: 1 + volumeMounts: + - name: ssl-certs-kubernetes + mountPath: "{{ kube_pki_dir }}" + readOnly: true + - name: ssl-certs-host + mountPath: /etc/ssl/certs + readOnly: true + - name: logfile + mountPath: /var/log/kube-controller-manager.log + readOnly: false + - name: kubeconfig + mountPath: "{{ kube_controller_manager_kubeconfig }}" + readOnly: true + volumes: + - name: ssl-certs-kubernetes + hostPath: + path: "{{ kube_pki_dir }}" + - name: ssl-certs-host + hostPath: + path: /usr/share/ca-certificates + - name: logfile + hostPath: + path: /var/log/kube-controller-manager.log + type: FileOrCreate + - name: kubeconfig + hostPath: + path: "{{ kube_controller_manager_kubeconfig }}" + type: File diff --git a/roles/kube-master/templates/manifests/kubernetes/kube-proxy.yaml.j2 b/roles/kube-master/templates/manifests/kubernetes/kube-proxy.yaml.j2 new file mode 100644 index 0000000..6bd3341 --- /dev/null +++ b/roles/kube-master/templates/manifests/kubernetes/kube-proxy.yaml.j2 @@ -0,0 +1,76 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Pod +metadata: + name: kube-proxy + namespace: kube-system + # This annotation ensures that kube-proxy does not get evicted if the node + # supports critical pod annotation based priority scheme. + # Note that kube-proxy runs as a static pod so this annotation does NOT have + # any effect on rescheduler (default scheduler and rescheduler are not + # involved in scheduling kube-proxy). + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + tier: node + component: kube-proxy +spec: + priorityClassName: system-node-critical + hostNetwork: true + tolerations: + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + containers: + - name: kube-proxy + image: "{{ hyperkube_image }}" + command: + - /hyperkube + - proxy + - --config={{ kube_proxy_config }} + securityContext: + privileged: true + # resources: + # requests: + # cpu: {{ kube_proxy_requests_cpu }} + # memory: {{ kube_proxy_requests_memory }} + # limits: + # cpu: {{ kube_proxy_limits_cpu }} + # memory: {{ kube_proxy_limits_memory }} + volumeMounts: + - name: config + mountPath: "{{ kube_proxy_config }}" + readOnly: true + - name: kubeconfig + mountPath: "{{ kube_proxy_kubeconfig }}" + readOnly: true + - name: coreos-modules + mountPath: /lib/modules + readOnly: true + - name: ssl-certs-kubernetes + mountPath: "{{ kube_pki_dir }}" + readOnly: true + - name: varlog + mountPath: /var/log + readOnly: false + volumes: + - name: config + hostPath: + path: "{{ kube_proxy_config }}" + type: File + - name: kubeconfig + hostPath: + path: "{{ kube_proxy_kubeconfig }}" + type: File + - name: coreos-modules + hostPath: + path: /lib/modules + - name: varlog + hostPath: + path: /var/log + - name: ssl-certs-kubernetes + hostPath: + path: "{{ kube_pki_dir }}" diff --git a/roles/kube-master/templates/manifests/kubernetes/kube-scheduler.yaml.j2 b/roles/kube-master/templates/manifests/kubernetes/kube-scheduler.yaml.j2 new file mode 100644 index 0000000..7e6fb82 --- /dev/null +++ b/roles/kube-master/templates/manifests/kubernetes/kube-scheduler.yaml.j2 @@ -0,0 +1,66 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler + namespace: kube-system + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + seccomp.security.alpha.kubernetes.io/pod: docker/default + labels: + tier: control-plane + component: kube-scheduler +spec: + hostNetwork: true + containers: + - name: kube-scheduler + image: "{{ hyperkube_image }}" + command: + - /hyperkube + - scheduler + - --config={{ kube_scheduler_config }} + # resources: + # requests: + # cpu: {{ kube_scheduler_requests_cpu }} + # memory: {{ kube_scheduler_requests_memory }} + # limits: + # cpu: {{ kube_scheduler_limits_cpu }} + # memory: {{ kube_scheduler_limits_memory }} + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10251 + initialDelaySeconds: 15 + timeoutSeconds: 15 + volumeMounts: + - name: config + mountPath: "{{ kube_scheduler_config }}" + readOnly: true + - name: kubeconfig + mountPath: "{{ kube_scheduler_kubeconfig }}" + readOnly: true + - name: logfile + mountPath: /var/log/kube-scheduler.log + readOnly: false + - name: ssl-certs-kubernetes + mountPath: "{{ kube_pki_dir }}" + readOnly: true + volumes: + - name: config + hostPath: + path: "{{ kube_scheduler_config }}" + type: File + - name: kubeconfig + hostPath: + path: "{{ kube_scheduler_kubeconfig }}" + type: File + - name: logfile + hostPath: + path: /var/log/kube-scheduler.log + type: FileOrCreate + - name: ssl-certs-kubernetes + hostPath: + path: "{{ kube_pki_dir }}" diff --git a/roles/kube-master/templates/services/kubelet.service.j2 b/roles/kube-master/templates/services/kubelet.service.j2 new file mode 100644 index 0000000..20aa882 --- /dev/null +++ b/roles/kube-master/templates/services/kubelet.service.j2 @@ -0,0 +1,18 @@ +# {{ ansible_managed }} + +[Unit] +Description=Kubernetes Kubelet Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=docker.service +Wants=docker.socket +StartLimitIntervalSec=10 +StartLimitBurst=3 + +[Service] +ExecStart={{ docker_compose_path }} -f {{ kubelet_compose_file }} up -d +ExecStartPre=-{{ docker_path }}/docker rm -f kubelet +Restart=on-failure +RestartSec=10s + +[Install] +WantedBy=multi-user.target diff --git a/roles/kube-worker/defaults/main.yml b/roles/kube-worker/defaults/main.yml new file mode 100644 index 0000000..ab8f1d0 --- /dev/null +++ b/roles/kube-worker/defaults/main.yml @@ -0,0 +1,33 @@ +--- +# defaults file for roles/ca + +hyperkube_image: "{{ hyperkube_image_repo }}:{{ kubernetes_version }}" + +ansible_templates_dir: "{{ role_path }}/templates" +network_plugin: "cni" +docker_daemon_graph: "/var/lib/docker" +kubelet_compose_file: "{{ compose_manifest_dir }}/kubelet.yml" + +# Resource requests/limits are defined but not actually applied +# Optimize the values as per your setup and renable them +# (uncomment respective blocks in kubernetes components) +# Abrupt values might cause timeouts (especially TLS handshake timeouts) + +kube_heapster_base_metrics_cpu: "100m" +kube_heapster_requests_cpu: "50m" +kube_heapster_requests_memory: "90Mi" +kube_heapster_limits_cpu: "50m" +kube_heapster_limits_memory: "90Mi" +kube_heapster_base_memory: "140Mi" +kube_heapster_cpu_per_node: "0.5m" +kube_heapster_memory_per_node: "4" + +kube_kubelet_config: "{{ kube_config_dir }}/config-kubelet.yaml" +kube_kubelet_kubeconfig: "{{ kube_config_dir }}/kubeconfig-kubelet.yaml" + +kube_proxy_requests_cpu: "200m" +kube_proxy_requests_memory: "128Mi" +kube_proxy_limits_cpu: "200m" # requests and limits should be same to get Guaranteed QoS +kube_proxy_limits_memory: "128Mi" +kube_proxy_config: "{{ kube_config_dir }}/config-proxy.yaml" +kube_proxy_kubeconfig: "{{ kube_config_dir }}/kubeconfig-proxy.yaml" diff --git a/roles/kube-worker/tasks/main.yml b/roles/kube-worker/tasks/main.yml new file mode 100644 index 0000000..1126874 --- /dev/null +++ b/roles/kube-worker/tasks/main.yml @@ -0,0 +1,41 @@ +# tasks file for roles +--- + +- name: Recursively copy templates + include: "{{ playbook_dir }}/macros/recursive_template_copy.yml" + with_items: + # Configs + - name: Add Kubernetes configs + src : "{{ ansible_templates_dir }}/configs" + dest: "{{ kube_config_dir }}" + # Addons + - name: Copy Kubernetes addons + src : "{{ ansible_templates_dir }}/addons" + dest: "{{ kube_addons_dir }}" + +- name: Non-recursively copy templates + include: "{{ playbook_dir }}/macros/template_copy.yml" + with_items: + # Docker-Compose manifests + - name: Add Docker-Compose manifests + src : "{{ ansible_templates_dir }}/manifests/docker-compose" + dest: "{{ compose_manifest_dir }}" + # Kubernetes Manifests + - name: Add Kubernetes manifests + src : "{{ ansible_templates_dir }}/manifests/kubernetes" + dest: "{{ kube_manifest_dir }}" + # Systemd Services + - name: Add component service-files + src : "{{ ansible_templates_dir }}/services" + dest: "/etc/systemd/system" + +- name: Start Kubelet + systemd: + name: kubelet + state: started + enabled: true + daemon_reload: yes + +- name: Wait 1-minute to allow kubelet to sync with master + wait_for: + timeout: 60 diff --git a/roles/kube-worker/templates/configs/config-kubelet.yaml.j2 b/roles/kube-worker/templates/configs/config-kubelet.yaml.j2 new file mode 100644 index 0000000..89a8a69 --- /dev/null +++ b/roles/kube-worker/templates/configs/config-kubelet.yaml.j2 @@ -0,0 +1,21 @@ +# {{ ansible_managed }} +--- + +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +address: {{ ansible_env.COREOS_PUBLIC_IPV4 }} +staticPodPath: {{ kube_manifest_dir }} +clusterDNS: + - {{ dns_service_ip }} +clusterDomain: "{{ dns_domain }}" +authentication: + anonymous: + enabled: true + webhook: + enabled: false + x509: + clientCAFile: {{ kube_pki_dir }}/ca.pem +authorization: + mode: AlwaysAllow +tlsCertFile: "{{ kube_pki_dir }}/worker.pem" +tlsPrivateKeyFile: "{{ kube_pki_dir }}/worker-key.pem" diff --git a/roles/kube-worker/templates/configs/config-proxy.yaml.j2 b/roles/kube-worker/templates/configs/config-proxy.yaml.j2 new file mode 100644 index 0000000..4be584d --- /dev/null +++ b/roles/kube-worker/templates/configs/config-proxy.yaml.j2 @@ -0,0 +1,12 @@ +# {{ ansible_managed }} +--- + +kind: KubeProxyConfiguration +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +clusterCIDR: {{ kube_pod_network }} +hostnameOverride: {{ ansible_hostname }} +bindAddress: {{ ansible_env.COREOS_PUBLIC_IPV4 }} +mode: iptables +healthzBindAddress: 127.0.0.1 +clientConnection: + kubeconfig: "{{ kube_proxy_kubeconfig }}" diff --git a/roles/kube-worker/templates/configs/kubeconfig-kubelet.yaml.j2 b/roles/kube-worker/templates/configs/kubeconfig-kubelet.yaml.j2 new file mode 100644 index 0000000..9fa4910 --- /dev/null +++ b/roles/kube-worker/templates/configs/kubeconfig-kubelet.yaml.j2 @@ -0,0 +1,21 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: "{{ kube_pki_dir }}/ca.pem" + server: {{ init_apiserver_address }} +users: +- name: kubelet + user: + client-certificate: "{{ kube_pki_dir }}/worker.pem" + client-key: "{{ kube_pki_dir }}/worker-key.pem" +contexts: +- context: + cluster: local + user: kubelet + name: kubelet-context +current-context: kubelet-context diff --git a/roles/kube-worker/templates/configs/kubeconfig-proxy.yaml.j2 b/roles/kube-worker/templates/configs/kubeconfig-proxy.yaml.j2 new file mode 100644 index 0000000..d3e85f9 --- /dev/null +++ b/roles/kube-worker/templates/configs/kubeconfig-proxy.yaml.j2 @@ -0,0 +1,21 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: "{{ kube_pki_dir }}/ca.pem" + server: {{ init_apiserver_address }} +users: +- name: kube-proxy + user: + client-certificate: "{{ kube_pki_dir }}/worker.pem" + client-key: "{{ kube_pki_dir }}/worker-key.pem" +contexts: +- context: + cluster: local + user: kube-proxy + name: kube-proxy-default +current-context: kube-proxy-default diff --git a/roles/kube-worker/templates/manifests/docker-compose/kubelet.yml.j2 b/roles/kube-worker/templates/manifests/docker-compose/kubelet.yml.j2 new file mode 100644 index 0000000..3907d7d --- /dev/null +++ b/roles/kube-worker/templates/manifests/docker-compose/kubelet.yml.j2 @@ -0,0 +1,40 @@ +# {{ ansible_managed }} +--- + +version: "3.6" + +services: + kubelet: + container_name: kubelet + image: "{{ hyperkube_image }}" + network_mode: host + pid: host + privileged: true + restart: on-failure:5 + command: > + ./hyperkube kubelet + --cni-conf-dir=/etc/cni/net.d + --config={{ kube_kubelet_config }} + --container-runtime=docker + --hostname-override={{ ansible_env.COREOS_PUBLIC_IPV4 }} + --kubeconfig={{ kube_kubelet_kubeconfig }} + --network-plugin={{ network_plugin }} + --node-labels=worker,node-role.kubernetes.io/worker="" + volumes: + # RW volumes + - "{{ docker_daemon_graph }}:{{ docker_daemon_graph }}:rw" + - /dev:/dev:rw + - /etc/cni:/etc/cni:rw + - /etc/resolv.conf:/etc/resolv.conf:rw + - /usr/share/ca-certificates:/usr/share/ca-certificates:rw + - /var/log:/var/log:rw + - /var/run:/var/run:rw + # RO volumes + - "{{ base_kube_dir }}:{{ base_kube_dir }}:ro" + - /etc/os-release:/etc/os-release:ro + - /etc/ssl:/etc/ssl:ro + - /opt/cni:/opt/cni:ro + - /sys:/sys:ro + # Shared volumes + - /var/lib/kubelet:/var/lib/kubelet:shared + - /var/lib/cni:/var/lib/cni:shared diff --git a/roles/kube-worker/templates/manifests/kubernetes/kube-addon-manager.yaml.j2 b/roles/kube-worker/templates/manifests/kubernetes/kube-addon-manager.yaml.j2 new file mode 100644 index 0000000..0df6bdb --- /dev/null +++ b/roles/kube-worker/templates/manifests/kubernetes/kube-addon-manager.yaml.j2 @@ -0,0 +1,40 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Pod +metadata: + name: kube-addon-manager + namespace: kube-system + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + seccomp.security.alpha.kubernetes.io/pod: docker/default + labels: + component: kube-addon-manager +spec: + hostNetwork: true + containers: + - name: kube-addon-manager + image: k8s.gcr.io/kube-addon-manager:v8.6 + command: + - /bin/bash + - -c + - exec /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1 + resources: + requests: + cpu: 5m + memory: 50Mi + volumeMounts: + - mountPath: "{{ base_kube_dir }}" + name: addons + readOnly: true + - mountPath: /var/log + name: varlog + readOnly: false + volumes: + - name: addons + hostPath: + path: "{{ base_kube_dir }}" + - name: varlog + hostPath: + path: /var/log diff --git a/roles/kube-worker/templates/manifests/kubernetes/kube-proxy.yaml.j2 b/roles/kube-worker/templates/manifests/kubernetes/kube-proxy.yaml.j2 new file mode 100644 index 0000000..6bd3341 --- /dev/null +++ b/roles/kube-worker/templates/manifests/kubernetes/kube-proxy.yaml.j2 @@ -0,0 +1,76 @@ +# {{ ansible_managed }} +--- + +apiVersion: v1 +kind: Pod +metadata: + name: kube-proxy + namespace: kube-system + # This annotation ensures that kube-proxy does not get evicted if the node + # supports critical pod annotation based priority scheme. + # Note that kube-proxy runs as a static pod so this annotation does NOT have + # any effect on rescheduler (default scheduler and rescheduler are not + # involved in scheduling kube-proxy). + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + tier: node + component: kube-proxy +spec: + priorityClassName: system-node-critical + hostNetwork: true + tolerations: + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + containers: + - name: kube-proxy + image: "{{ hyperkube_image }}" + command: + - /hyperkube + - proxy + - --config={{ kube_proxy_config }} + securityContext: + privileged: true + # resources: + # requests: + # cpu: {{ kube_proxy_requests_cpu }} + # memory: {{ kube_proxy_requests_memory }} + # limits: + # cpu: {{ kube_proxy_limits_cpu }} + # memory: {{ kube_proxy_limits_memory }} + volumeMounts: + - name: config + mountPath: "{{ kube_proxy_config }}" + readOnly: true + - name: kubeconfig + mountPath: "{{ kube_proxy_kubeconfig }}" + readOnly: true + - name: coreos-modules + mountPath: /lib/modules + readOnly: true + - name: ssl-certs-kubernetes + mountPath: "{{ kube_pki_dir }}" + readOnly: true + - name: varlog + mountPath: /var/log + readOnly: false + volumes: + - name: config + hostPath: + path: "{{ kube_proxy_config }}" + type: File + - name: kubeconfig + hostPath: + path: "{{ kube_proxy_kubeconfig }}" + type: File + - name: coreos-modules + hostPath: + path: /lib/modules + - name: varlog + hostPath: + path: /var/log + - name: ssl-certs-kubernetes + hostPath: + path: "{{ kube_pki_dir }}" diff --git a/roles/kube-worker/templates/services/kubelet.service.j2 b/roles/kube-worker/templates/services/kubelet.service.j2 new file mode 100644 index 0000000..66f1928 --- /dev/null +++ b/roles/kube-worker/templates/services/kubelet.service.j2 @@ -0,0 +1,18 @@ +# {{ ansible_managed }} + +[Unit] +Description=Kubernetes Kubelet Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=docker.service +Wants=docker.socket +StartLimitIntervalSec=10s, +StartLimitBurst=3 + +[Service] +ExecStart={{ docker_compose_path }} -f {{ kubelet_compose_file }} up -d +ExecStartPre=-{{ docker_path }}/docker rm -f kubelet +Restart=on-failure +RestartSec=10s + +[Install] +WantedBy=multi-user.target diff --git a/roles/kubectl/defaults/main.yml b/roles/kubectl/defaults/main.yml new file mode 100644 index 0000000..04d05ec --- /dev/null +++ b/roles/kubectl/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for roles/ca + +kubectl_checksum: a9e7f82e516aa8a652bc485620483ea964eb940787cabf890d7eab96eaac294d diff --git a/roles/kubectl/tasks/config-kubectl.yml b/roles/kubectl/tasks/config-kubectl.yml new file mode 100644 index 0000000..1a73860 --- /dev/null +++ b/roles/kubectl/tasks/config-kubectl.yml @@ -0,0 +1,66 @@ +--- +# tasks file for roles + +- name: Create Kubernetes ssl directory + file: + path: "{{ kube_pki_dir }}" + state: directory + +- name: Add admin certificates to kubernetes ssl directory + copy: + src: "{{ kube_pki_dir }}/{{ item }}" + dest: "{{ kube_pki_dir }}/{{ item }}" + mode: 0644 + owner: root + group: root + with_items: + - ca.pem + - admin.pem + - admin-key.pem + +- name: Set default cluster + command: > + kubectl config set-cluster default-cluster + --server={{ init_apiserver_address }} + --certificate-authority=ca.pem + args: + chdir: "{{ kube_pki_dir }}" + +- name: Set credentials + command: > + kubectl config set-credentials default-admin + --certificate-authority=ca.pem + --client-key=admin-key.pem + --client-certificate=admin.pem + args: + chdir: "{{ kube_pki_dir }}" + +- name: Set context + command: > + kubectl config set-context default-system + --cluster=default-cluster + --user=default-admin + args: + chdir: "{{ kube_pki_dir }}" + +- name: Use context + command: kubectl config use-context default-system + args: + chdir: "{{ kube_pki_dir }}" + +- name: Get system user-name + command: whoami + register: user_name + become: false + +- name: Get system user-home + shell: "getent passwd {{ user_name.stdout }} | cut -d: -f6" + register: user_home + +- name: Create local kube-config directory + file: + path: "{{ user_home.stdout }}/.kube" + state: directory + +- name: Copy kubectl config to user-home + shell: "kubectl config view > {{ user_home.stdout }}/.kube/config" diff --git a/roles/kubectl/tasks/install-kubectl.yml b/roles/kubectl/tasks/install-kubectl.yml new file mode 100644 index 0000000..db8eab4 --- /dev/null +++ b/roles/kubectl/tasks/install-kubectl.yml @@ -0,0 +1,21 @@ +--- +# tasks file for roles + +- name: Create kubectl binary directory + file: + path: "{{ binary_copy_path }}" + state: directory + +# Because Ansible get_url fails at SSL for some reason. This doesn't. +- name: Download kubectl + command: | + /bin/curl --output {{ binary_copy_path }}/kubectl https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl + args: + warn: false + +- name: Set kubectl binary permissions + file: + path: "{{ binary_copy_path }}/kubectl" + owner: root + group: root + mode: 0755 diff --git a/roles/kubectl/tasks/main.yml b/roles/kubectl/tasks/main.yml new file mode 100644 index 0000000..41366ad --- /dev/null +++ b/roles/kubectl/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# tasks file for roles + +- name: Install or update kubectl + include: install-kubectl.yml + +- name: Configure kubectl + include: config-kubectl.yml