From 4b39d9cc695f827ebb8d24116bb25295fce2474e Mon Sep 17 00:00:00 2001 From: Tarun Prakash Date: Tue, 9 Jul 2019 13:15:49 +0530 Subject: [PATCH] updated README and removed frankfurt specific key word from all the files --- README.md | 18 +- aws-vpn-gtw.tf | 6 +- backend.tf | 12 - .../cluster-autoscalar/ca-iam-policy.tf | 59 ---- cluster-addons/cluster-autoscalar/eks-ca.yaml | 154 -------- .../ingress-controller/private-ingress.yaml | 333 ------------------ .../ingress-controller/public-ingress.yaml | 332 ----------------- cluster-addons/kube2iam/kube2iam.yaml | 76 ---- .../aggregated-metrics-reader.yaml | 12 - .../metrics-server/auth-delegator.yaml | 13 - .../metrics-server/auth-reader.yaml | 14 - .../metrics-server/metrics-apiservice.yaml | 14 - .../metrics-server-deployment.yaml | 42 --- .../metrics-server-service.yaml | 15 - .../metrics-server/resource-reader.yaml | 29 -- eks-cluster.tf | 42 +-- eks-worker-node.tf | 54 +-- frankfurt-eks-vpc.tf | 50 +-- iam.tf | 26 +- outputs.tf | 6 +- private-route.tf | 4 +- providers.tf | 2 +- variables.tf | 7 +- 23 files changed, 113 insertions(+), 1207 deletions(-) delete mode 100644 backend.tf delete mode 100755 cluster-addons/cluster-autoscalar/ca-iam-policy.tf delete mode 100644 cluster-addons/cluster-autoscalar/eks-ca.yaml delete mode 100755 cluster-addons/ingress-controller/private-ingress.yaml delete mode 100755 cluster-addons/ingress-controller/public-ingress.yaml delete mode 100644 cluster-addons/kube2iam/kube2iam.yaml delete mode 100755 cluster-addons/metrics-server/aggregated-metrics-reader.yaml delete mode 100755 cluster-addons/metrics-server/auth-delegator.yaml delete mode 100755 cluster-addons/metrics-server/auth-reader.yaml delete mode 100755 cluster-addons/metrics-server/metrics-apiservice.yaml delete mode 100755 cluster-addons/metrics-server/metrics-server-deployment.yaml delete mode 100755 cluster-addons/metrics-server/metrics-server-service.yaml delete mode 100755 cluster-addons/metrics-server/resource-reader.yaml diff --git a/README.md b/README.md index 9203f30..e1955f1 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,8 @@ This repository showcases the terraform template that will help you to create EK **Note** - Above architecture doesn't reflect all the components that are created by this template. However, it does give an idea about core infrastructure that will be created. -- Creates a new VPC with CIDR Block - 10.15.0.0/19 (i.e 8190 IPs in a VPC) in Frankfurt region. Feel free to change it, values are `variables.tf`. -- Creates 3 public & 3 private subnets with each size of 1024 IP addresses in each zones (eu-central-1a, eu-central-1b and eu-central-1c +- Creates a new VPC with CIDR Block - 10.15.0.0/19 (i.e 8190 IPs in a VPC) in a region of your choice. Feel free to change it, values are `variables.tf`. +- Creates 3 public & 3 private subnets with each size of 1024 IP addresses in each zones - Creates security groups required for cluster and worker nodes. - Creates recommened IAM service and EC2 roles required for EKS cluster. - Creates Internet & NAT Gateway required for public and private communications. @@ -42,16 +42,22 @@ This will ask you to specify `cluster name` and worker node instance type. ``` $ terraform plan + var.cluster-name - Enter eks cluster name - example like eks-frankfurt + Enter eks cluster name - example like eks-demo, eks-dev etc + + Enter a value: eks-demo - Enter a value: eks-frankfurt +var.region + Enter region you want to create EKS cluster in + + Enter a value: us-east-1 var.ssh_key_pair Enter SSH keypair name that already exist in the account - Enter a value: somename - + Enter a value: eks-keypair + var.worker-node-instance_type enter worker node instance type diff --git a/aws-vpn-gtw.tf b/aws-vpn-gtw.tf index 008ec7c..4723f39 100755 --- a/aws-vpn-gtw.tf +++ b/aws-vpn-gtw.tf @@ -1,10 +1,10 @@ -# create aws vpn gateway for EKS VPC Frankfurt +# create aws vpn gateway for EKS VPC resource "aws_vpn_gateway" "vpn_gw" { - vpc_id = "${aws_vpc.frankfurt.id}" + vpc_id = "${aws_vpc.eks.id}" tags = "${ map( - "Name", "eks aws vpn gateway frankfurt" + "Name", "eks aws vpn gateway" ) }" } diff --git a/backend.tf b/backend.tf deleted file mode 100644 index 7c08188..0000000 --- a/backend.tf +++ /dev/null @@ -1,12 +0,0 @@ -## A "backend" in Terraform determines how state is loaded. Its completely optional but recommended. -## Terraform remote state management - visit https://www.terraform.io/docs/backends/index.html -## eks-frankfurt is the folder inside the bucket that you are going to choose to store terraform state files. -## make sure you create it in advance. - -#terraform { -# backend "s3" { -# bucket = "{var.s3_bucket_name}" -# key = "eks-frankfurt/terraform.tfstate" -# region = "us-east-1" -# } -#} diff --git a/cluster-addons/cluster-autoscalar/ca-iam-policy.tf b/cluster-addons/cluster-autoscalar/ca-iam-policy.tf deleted file mode 100755 index 9018940..0000000 --- a/cluster-addons/cluster-autoscalar/ca-iam-policy.tf +++ /dev/null @@ -1,59 +0,0 @@ -resource "aws_iam_role_policy" "frankfurt_ca_policy" { - name = "frankfurt_ca_policy" - role = "${aws_iam_role.frankfurt_ca_role.id}" - - policy = <-" - # Here: "-" - # This has to be adapted if you change either parameter - # when launching the nginx-ingress-controller. - - "ingress-controller-leader-nginx" - verbs: - - get - - update - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: nginx-eksprivate-role-nisa-binding - namespace: ingress-eksprivate-nginx -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-eksprivate-role -subjects: - - kind: ServiceAccount - name: nginx-eksprivate-serviceaccount - namespace: ingress-eksprivate-nginx - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: nginx-eksprivate-clusterrole-nisa-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-eksprivate-clusterrole -subjects: - - kind: ServiceAccount - name: nginx-eksprivate-serviceaccount - namespace: ingress-eksprivate-nginx ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: default-eks-http-backend - labels: - app: default-eks-http-backend - namespace: ingress-eksprivate-nginx -spec: - replicas: 2 - selector: - matchLabels: - app: default-eks-http-backend - template: - metadata: - labels: - app: default-eks-http-backend - spec: - terminationGracePeriodSeconds: 60 - containers: - - name: default-eks-http-backend - # Any image is permissible as long as: - # 1. It serves a 404 page at / - # 2. It serves 200 on a /healthz endpoint - image: gcr.io/google_containers/defaultbackend:1.4 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - ports: - - containerPort: 8080 - resources: - limits: - cpu: 10m - memory: 20Mi - requests: - cpu: 10m - memory: 20Mi ---- - -apiVersion: v1 -kind: Service -metadata: - name: default-eks-http-backend - namespace: ingress-eksprivate-nginx - labels: - app: default-eks-http-backend -spec: - ports: - - port: 80 - targetPort: 8080 - selector: - app: default-eks-http-backend - ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nginx-eksprivate-controller - namespace: ingress-eksprivate-nginx -spec: - replicas: 2 - selector: - matchLabels: - app: ingress-eksprivate-nginx - template: - metadata: - labels: - app: ingress-eksprivate-nginx - annotations: - prometheus.io/port: '10254' - prometheus.io/scrape: 'true' - spec: - serviceAccountName: nginx-eksprivate-serviceaccount - containers: - - name: nginx-ingress-controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.17.1 - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-eks-http-backend - - --configmap=$(POD_NAMESPACE)/nginx-configuration - - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services - - --udp-services-configmap=$(POD_NAMESPACE)/udp-services - - --publish-service=$(POD_NAMESPACE)/ingress-eksprivate-nginx - - --annotations-prefix=nginx.ingress.kubernetes.io - - --ingress-class=private-nginx - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - securityContext: - runAsNonRoot: false ---- -kind: Service -apiVersion: v1 -metadata: - name: ingress-eksprivate-nginx - namespace: ingress-eksprivate-nginx - labels: - app: ingress-eksprivate-nginx - annotations: - # Specifies whether cross-zone load balancing is enabled for the load balancer - service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" - # Expose this as internal load balancer - service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0" - # replace with the correct value of the generated certificate in the AWS console Mediaiqdigital.com - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:eu-central-1:230367374156:certificate/d5ea0711-b37c-49de-a407-f5857f09d229" - # the backend instances are HTTP - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" - # Map port 443 - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" - # Increase the ELB idle timeout to avoid issues with WebSockets or Server-Sent Events. - service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' - # tags for ELB - service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "Name=frankfurt-private-ingess-controller,TEAM=Devops,PRODUCT=EKS,ENVIRONMENT=PRODUCTION" - # health check interval - service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: '5' - # ELB time out - service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: '3' - # health check unhealthy threshold - service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: '2' - # healthy threshold - service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: '2' - # connection draining - service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" - -spec: - type: LoadBalancer - selector: - app: ingress-eksprivate-nginx - ports: - - name: http - port: 80 - targetPort: http - - name: https - port: 443 - targetPort: http diff --git a/cluster-addons/ingress-controller/public-ingress.yaml b/cluster-addons/ingress-controller/public-ingress.yaml deleted file mode 100755 index 5d52609..0000000 --- a/cluster-addons/ingress-controller/public-ingress.yaml +++ /dev/null @@ -1,332 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: ingress-ekspublic-nginx ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: nginx-configuration - namespace: ingress-ekspublic-nginx - labels: - app: ingress-ekspublic-nginx ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - name: tcp-services - namespace: ingress-ekspublic-nginx ---- - -kind: ConfigMap -apiVersion: v1 -metadata: - name: udp-services - namespace: ingress-ekspublic-nginx ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nginx-ekspublic-serviceaccount - namespace: ingress-ekspublic-nginx - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: nginx-ekspublic-clusterrole -rules: - - apiGroups: - - "" - resources: - - configmaps - - endpoints - - nodes - - pods - - secrets - verbs: - - list - - watch - - update - - apiGroups: - - "" - resources: - - nodes - verbs: - - get - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - apiGroups: - - "extensions" - resources: - - ingresses - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - apiGroups: - - "extensions" - resources: - - ingresses/status - verbs: - - update - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: nginx-ekspublic-role - namespace: ingress-ekspublic-nginx -rules: - - apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - namespaces - verbs: - - get - - apiGroups: - - "" - resources: - - configmaps - resourceNames: - # Defaults to "-" - # Here: "-" - # This has to be adapted if you change either parameter - # when launching the nginx-ingress-controller. - - "ingress-controller-leader-nginx" - verbs: - - get - - update - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: nginx-ekspublic-role-nisa-binding - namespace: ingress-ekspublic-nginx -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ekspublic-role -subjects: - - kind: ServiceAccount - name: nginx-ekspublic-serviceaccount - namespace: ingress-ekspublic-nginx - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: nginx-ekspublic-clusterrole-nisa-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ekspublic-clusterrole -subjects: - - kind: ServiceAccount - name: nginx-ekspublic-serviceaccount - namespace: ingress-ekspublic-nginx ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: default-eks-http-backend - labels: - app: default-eks-http-backend - namespace: ingress-ekspublic-nginx -spec: - replicas: 2 - selector: - matchLabels: - app: default-eks-http-backend - template: - metadata: - labels: - app: default-eks-http-backend - spec: - terminationGracePeriodSeconds: 60 - containers: - - name: default-eks-http-backend - # Any image is permissible as long as: - # 1. It serves a 404 page at / - # 2. It serves 200 on a /healthz endpoint - image: gcr.io/google_containers/defaultbackend:1.4 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - ports: - - containerPort: 8080 - resources: - limits: - cpu: 10m - memory: 20Mi - requests: - cpu: 10m - memory: 20Mi ---- - -apiVersion: v1 -kind: Service -metadata: - name: default-eks-http-backend - namespace: ingress-ekspublic-nginx - labels: - app: default-eks-http-backend -spec: - ports: - - port: 80 - targetPort: 8080 - selector: - app: default-eks-http-backend - ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nginx-ekspublic-controller - namespace: ingress-ekspublic-nginx -spec: - replicas: 2 - selector: - matchLabels: - app: ingress-ekspublic-nginx - template: - metadata: - labels: - app: ingress-ekspublic-nginx - annotations: - prometheus.io/port: '10254' - prometheus.io/scrape: 'true' - spec: - serviceAccountName: nginx-ekspublic-serviceaccount - containers: - - name: nginx-ingress-controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.17.1 - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-eks-http-backend - - --configmap=$(POD_NAMESPACE)/nginx-configuration - - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services - - --udp-services-configmap=$(POD_NAMESPACE)/udp-services - - --publish-service=$(POD_NAMESPACE)/ingress-ekspublic-nginx - - --annotations-prefix=nginx.ingress.kubernetes.io - - --ingress-class=public-nginx - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - securityContext: - runAsNonRoot: false ---- -kind: Service -apiVersion: v1 -metadata: - name: ingress-ekspublic-nginx - namespace: ingress-ekspublic-nginx - labels: - app: ingress-ekspublic-nginx - annotations: - # Specifies whether cross-zone load balancing is enabled for the load balancer - service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" - # Expose this as internal load balancer - # service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0" - # replace with the correct value of the generated certificate in the AWS console Mediaiqdigital.com - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:eu-central-1:230367374156:certificate/d5ea0711-b37c-49de-a407-f5857f09d229" - # the backend instances are HTTP - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" - # Map port 443 - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" - # Increase the ELB idle timeout to avoid issues with WebSockets or Server-Sent Events. - service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' - # tags for ELB - service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "Name=eks-public-ingress-controller,TEAM=Devops,PRODUCT=EKS,ENVRIONMENT=PRODUCTION" - # health check interval - service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: '5' - # ELB time out - service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: '3' - # health check unhealthy threshold - service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: '2' - # healthy threshold - service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: '2' - # connection draining - service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true" - -spec: - type: LoadBalancer - selector: - app: ingress-ekspublic-nginx - ports: - - name: http - port: 80 - targetPort: http - - name: https - port: 443 - targetPort: http diff --git a/cluster-addons/kube2iam/kube2iam.yaml b/cluster-addons/kube2iam/kube2iam.yaml deleted file mode 100644 index fdaef81..0000000 --- a/cluster-addons/kube2iam/kube2iam.yaml +++ /dev/null @@ -1,76 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube2iam - namespace: kube-system ---- -apiVersion: v1 -items: - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: kube2iam - rules: - - apiGroups: [""] - resources: ["namespaces","pods"] - verbs: ["get","watch","list"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: kube2iam - subjects: - - kind: ServiceAccount - name: kube2iam - namespace: kube-system - roleRef: - kind: ClusterRole - name: kube2iam - apiGroup: rbac.authorization.k8s.io -kind: List ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube2iam - namespace: kube-system - labels: - app: kube2iam -spec: - selector: - matchLabels: - name: kube2iam - template: - metadata: - labels: - name: kube2iam - spec: - serviceAccountName: kube2iam - hostNetwork: true - containers: - - image: jtblin/kube2iam:latest # instead of latest use 0.10.4 to fix iam cred 500 error - imagePullPolicy: Always - name: kube2iam - args: - - "--auto-discover-base-arn" - - "--auto-discover-default-role=true" - - "--iptables=true" - - "--host-ip=$(HOST_IP)" - - "--node=$(NODE_NAME)" - - "--host-interface=eni+" - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - ports: - - containerPort: 8181 - hostPort: 8181 - name: http - securityContext: - privileged: true - tolerations: - - operator: Exists diff --git a/cluster-addons/metrics-server/aggregated-metrics-reader.yaml b/cluster-addons/metrics-server/aggregated-metrics-reader.yaml deleted file mode 100755 index cdf3415..0000000 --- a/cluster-addons/metrics-server/aggregated-metrics-reader.yaml +++ /dev/null @@ -1,12 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:aggregated-metrics-reader - labels: - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: -- apiGroups: ["metrics.k8s.io"] - resources: ["pods"] - verbs: ["get", "list", "watch"] diff --git a/cluster-addons/metrics-server/auth-delegator.yaml b/cluster-addons/metrics-server/auth-delegator.yaml deleted file mode 100755 index e3442c5..0000000 --- a/cluster-addons/metrics-server/auth-delegator.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: metrics-server:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system diff --git a/cluster-addons/metrics-server/auth-reader.yaml b/cluster-addons/metrics-server/auth-reader.yaml deleted file mode 100755 index f0616e1..0000000 --- a/cluster-addons/metrics-server/auth-reader.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: metrics-server-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system diff --git a/cluster-addons/metrics-server/metrics-apiservice.yaml b/cluster-addons/metrics-server/metrics-apiservice.yaml deleted file mode 100755 index 08b0530..0000000 --- a/cluster-addons/metrics-server/metrics-apiservice.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.metrics.k8s.io -spec: - service: - name: metrics-server - namespace: kube-system - group: metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 diff --git a/cluster-addons/metrics-server/metrics-server-deployment.yaml b/cluster-addons/metrics-server/metrics-server-deployment.yaml deleted file mode 100755 index 3c98cfa..0000000 --- a/cluster-addons/metrics-server/metrics-server-deployment.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metrics-server - namespace: kube-system ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: metrics-server - namespace: kube-system - labels: - k8s-app: metrics-server -spec: - selector: - matchLabels: - k8s-app: metrics-server - template: - metadata: - name: metrics-server - labels: - k8s-app: metrics-server - spec: - serviceAccountName: metrics-server - volumes: - # mount in tmp so we can safely use from-scratch images and/or read-only containers - - name: tmp-dir - emptyDir: {} - containers: - - name: metrics-server - image: k8s.gcr.io/metrics-server-amd64:v0.3.1 - imagePullPolicy: Always - command: - - /metrics-server - - --metric-resolution=30s - - --kubelet-insecure-tls - - --kubelet-preferred-address-types=InternalIP - volumeMounts: - - name: tmp-dir - mountPath: /tmp - diff --git a/cluster-addons/metrics-server/metrics-server-service.yaml b/cluster-addons/metrics-server/metrics-server-service.yaml deleted file mode 100755 index 082b00c..0000000 --- a/cluster-addons/metrics-server/metrics-server-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: metrics-server - namespace: kube-system - labels: - kubernetes.io/name: "Metrics-server" -spec: - selector: - k8s-app: metrics-server - ports: - - port: 443 - protocol: TCP - targetPort: 443 diff --git a/cluster-addons/metrics-server/resource-reader.yaml b/cluster-addons/metrics-server/resource-reader.yaml deleted file mode 100755 index 574efc5..0000000 --- a/cluster-addons/metrics-server/resource-reader.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:metrics-server -rules: -- apiGroups: - - "*" - resources: - - pods - - nodes - - nodes/stats - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:metrics-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:metrics-server -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system diff --git a/eks-cluster.tf b/eks-cluster.tf index 7ba86a5..1373975 100755 --- a/eks-cluster.tf +++ b/eks-cluster.tf @@ -5,8 +5,8 @@ # * EKS Cluster # -resource "aws_iam_role" "frankfurt-cluster" { - name = "terraform-eks-frankfurt-cluster" +resource "aws_iam_role" "eks-cluster" { + name = "terraform-eks-cluster" assume_role_policy = <