Skip to content

Commit

Permalink
Merge pull request #64 from opzkit/switch-provider
Browse files Browse the repository at this point in the history
feat: migrate to clayrisser/kops
  • Loading branch information
argoyle authored Nov 30, 2023
2 parents 6787b74 + 84d1c9c commit 7334353
Show file tree
Hide file tree
Showing 7 changed files with 88 additions and 77 deletions.
4 changes: 2 additions & 2 deletions examples/additional_nodes/provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ provider "aws" {
terraform {
required_providers {
kops = {
source = "eddycharly/kops"
version = "1.25.3"
source = "clayrisser/kops"
version = "1.28.0"
}

aws = {
Expand Down
4 changes: 2 additions & 2 deletions examples/basic/provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ provider "aws" {
terraform {
required_providers {
kops = {
source = "eddycharly/kops"
version = "1.25.3"
source = "clayrisser/kops"
version = "1.28.0"
}

aws = {
Expand Down
4 changes: 2 additions & 2 deletions examples/policies/provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ provider "aws" {
terraform {
required_providers {
kops = {
source = "eddycharly/kops"
version = "1.25.3"
source = "clayrisser/kops"
version = "1.28.0"
}

aws = {
Expand Down
123 changes: 61 additions & 62 deletions k8s.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,63 @@ resource "aws_s3_object" "addons" {
resource "kops_cluster" "k8s" {
name = var.name
admin_ssh_key = var.admin_ssh_key != null ? file(var.admin_ssh_key) : null
config_store {
base = "s3://${var.bucket_state_store.id}/${var.name}"
}
cloud_provider {
aws {}
aws {
load_balancer_controller {
enabled = true
}

node_termination_handler {
enable_prometheus_metrics = false
enable_scheduled_event_draining = false
enable_spot_interruption_draining = var.node_termination_handler_sqs
enabled = true
enable_sqs_termination_draining = var.node_termination_handler_sqs
managed_asg_tag = var.node_termination_handler_sqs ? "aws-node-termination-handler/managed" : null
enable_rebalance_draining = var.enable_rebalance_draining
enable_rebalance_monitoring = var.enable_rebalance_monitoring
}

pod_identity_webhook {
enabled = true
replicas = local.min_number_of_nodes > 1 ? 2 : 1
}
}
}
channel = "stable"
kubernetes_version = var.kubernetes_version
dns_zone = var.dns_zone
network_id = var.vpc_id

networking {
network_id = var.vpc_id

topology {
dns = "Public"
}

dynamic "subnet" {
for_each = var.private_subnet_ids
content {
name = "private-${var.region}${subnet.key}"
id = subnet.value
type = "Private"
zone = "${var.region}${subnet.key}"
}
}

dynamic "subnet" {
for_each = var.public_subnet_ids
content {
name = "utility-${var.region}${subnet.key}"
id = subnet.value
type = "Utility"
zone = "${var.region}${subnet.key}"
}
}

dynamic "cilium" {
for_each = lookup(local.allowed_cnis, "cilium")
content {
Expand All @@ -45,35 +93,6 @@ resource "kops_cluster" "k8s" {
}
}

topology {
masters = local.topology
nodes = local.topology

dns {
type = "Public"
}
}

dynamic "subnet" {
for_each = var.private_subnet_ids
content {
name = "private-${var.region}${subnet.key}"
provider_id = subnet.value
type = "Private"
zone = "${var.region}${subnet.key}"
}
}

dynamic "subnet" {
for_each = var.public_subnet_ids
content {
name = "utility-${var.region}${subnet.key}"
provider_id = subnet.value
type = "Utility"
zone = "${var.region}${subnet.key}"
}
}

dynamic "etcd_cluster" {
for_each = [
"main",
Expand All @@ -93,16 +112,13 @@ resource "kops_cluster" "k8s" {
}
}

kubernetes_api_access = [
"0.0.0.0/0"
]
ssh_access = [
"0.0.0.0/0"
]

additional_policies = {
master = length(local.master_policies) == 0 ? null : jsonencode(local.master_policies)
node = length(local.node_policies) == 0 ? null : jsonencode(local.node_policies)
control-plane = length(local.control_plane_policies) == 0 ? null : jsonencode(local.control_plane_policies)
node = length(local.node_policies) == 0 ? null : jsonencode(local.node_policies)
}

iam {
Expand All @@ -122,6 +138,7 @@ resource "kops_cluster" "k8s" {
}

api {
public_name = "api.${var.name}"
dns {}
dynamic "load_balancer" {
for_each = var.api_loadbalancer ? [1] : []
Expand All @@ -130,6 +147,9 @@ resource "kops_cluster" "k8s" {
class = "Network"
}
}
access = [
"0.0.0.0/0"
]
}

authentication {
Expand All @@ -150,10 +170,6 @@ resource "kops_cluster" "k8s" {
rbac {}
}

aws_load_balancer_controller {
enabled = true
}

cert_manager {
enabled = true
managed = true
Expand Down Expand Up @@ -186,22 +202,6 @@ resource "kops_cluster" "k8s" {
insecure = false
}

node_termination_handler {
enable_prometheus_metrics = false
enable_scheduled_event_draining = false
enable_spot_interruption_draining = var.node_termination_handler_sqs
enabled = true
enable_sqs_termination_draining = var.node_termination_handler_sqs
managed_asg_tag = var.node_termination_handler_sqs ? "aws-node-termination-handler/managed" : null
enable_rebalance_draining = var.enable_rebalance_draining
enable_rebalance_monitoring = var.enable_rebalance_monitoring
}

pod_identity_webhook {
enabled = true
replicas = local.min_number_of_nodes > 1 ? 2 : 1
}

service_account_issuer_discovery {
discovery_store = "s3://${aws_s3_bucket.issuer.bucket}"
enable_aws_oidc_provider = true
Expand All @@ -216,7 +216,7 @@ resource "kops_cluster" "k8s" {
}

external_policies {
key = "master"
key = "control-plane"
value = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
}

Expand All @@ -229,8 +229,8 @@ resource "kops_cluster" "k8s" {
resource "kops_instance_group" "masters" {
for_each = toset(local.master_subnets_zones)
cluster_name = kops_cluster.k8s.id
name = "master-${var.region}${each.key}"
role = "Master"
name = "${var.control_plane_prefix}-${var.region}${each.key}"
role = "ControlPlane"
image = var.master_image != null ? var.master_image : var.image
min_size = 1
max_size = 1
Expand All @@ -249,7 +249,7 @@ resource "kops_instance_group" "masters" {
"${local.node_group_subnet_prefix}${each.key}"
]
node_labels = {
"kops.k8s.io/instancegroup" = "master-${var.region}${each.key}"
"kops.k8s.io/instancegroup" = "${var.control_plane_prefix}-${var.region}${each.key}"
}
depends_on = [
kops_cluster.k8s
Expand All @@ -258,7 +258,7 @@ resource "kops_instance_group" "masters" {
instance_metadata {
http_put_response_hop_limit = 3
}
max_instance_lifetime = var.master_max_instance_lifetime_hours != null ? "${var.master_max_instance_lifetime_hours + parseint(sha1("master-${var.region}${each.key}"), 16) % 10}h0m0s" : null
max_instance_lifetime = var.master_max_instance_lifetime_hours != null ? "${var.master_max_instance_lifetime_hours + parseint(sha1("${var.control_plane_prefix}-${var.region}${each.key}"), 16) % 10}h0m0s" : null
}

resource "kops_instance_group" "nodes" {
Expand Down Expand Up @@ -338,7 +338,6 @@ resource "kops_instance_group" "additional_nodes" {
max_instance_lifetime = each.value.max_instance_lifetime_hours != null ? "${each.value.max_instance_lifetime_hours + parseint(sha1("nodes-${each.key}"), 16) % 10}h0m0s" : null
}


resource "kops_cluster_updater" "k8s_updater" {
cluster_name = kops_cluster.k8s.id

Expand Down
12 changes: 6 additions & 6 deletions locals.tf
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
locals {
master_policies_aws_loadbalancer = {
control_plane_policies_aws_loadbalancer = {
Action = [
"acm:ListCertificates",
"acm:DescribeCertificate",
]
Effect = "Allow"
Resource = "*"
}
master_policy_addon_bucket_access = {
control_plane_policy_addon_bucket_access = {
Effect : "Allow",
Action : [
"s3:GetObject"
Expand All @@ -16,9 +16,10 @@ locals {
"${var.bucket_state_store.arn}/${var.name}-addons/*"
]
}
master_policies = flatten([
local.master_policies_aws_loadbalancer,
local.master_policy_addon_bucket_access,
control_plane_policies = flatten([
local.control_plane_policies_aws_loadbalancer,
local.control_plane_policy_addon_bucket_access,
var.control_plane_policies,
var.master_policies
]
)
Expand Down Expand Up @@ -56,7 +57,6 @@ locals {

private_subnets_enabled = length(var.private_subnet_ids) > 0
node_group_subnet_prefix = local.private_subnets_enabled ? "private-${var.region}" : "utility-${var.region}"
topology = local.private_subnets_enabled ? "private" : "public"
master_subnets_zones = local.private_subnets_enabled ? slice(keys(var.private_subnet_ids), 0, var.master_count) : slice(keys(var.public_subnet_ids), 0, var.master_count)

min_nodes = tomap({ for k, v in var.public_subnet_ids : k => lookup(var.node_size, k, local.min_max_node_default).min })
Expand Down
4 changes: 2 additions & 2 deletions providers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ terraform {
version = "~> 5.0"
}
kops = {
source = "eddycharly/kops"
version = "~> 1.25.3"
source = "clayrisser/kops"
version = "1.28.0"
}
}
required_version = ">= 1.3.0"
Expand Down
14 changes: 13 additions & 1 deletion vars.tf
Original file line number Diff line number Diff line change
Expand Up @@ -166,10 +166,16 @@ variable "iam_role_mappings" {
description = "The IAM role arn that will be allowed access with a ClusterRole to the Kubernetes cluster. Mapping from IAM ARN => Kubernetes ClusterRole"
}

variable "control_plane_policies" {
type = any
default = []
description = "Additional control plane policies, https://kops.sigs.k8s.io/iam_roles/#adding-additional-policies"
}

variable "master_policies" {
type = any
default = []
description = "Additional master policies, https://kops.sigs.k8s.io/iam_roles/#adding-additional-policies"
description = "Deprecated, use control_plane_policies instead."
}

variable "node_policies" {
Expand Down Expand Up @@ -250,3 +256,9 @@ variable "alb_ssl_policy" {
default = null
description = "SSL policy to use for ALB, https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/ingress/annotations/#ssl-policy"
}

variable "control_plane_prefix" {
type = string
default = "control-plane"
description = "Prefix of control plane instance groups"
}

0 comments on commit 7334353

Please sign in to comment.