From c0e27cd9a4aad5353383454cdbc73f0bc87fcc41 Mon Sep 17 00:00:00 2001 From: nadav buchman Date: Wed, 8 May 2024 08:48:08 +0300 Subject: [PATCH] minor fixes for the example --- charts/karpenter_nodes/Chart.yaml | 2 +- .../examples/argocd_example.yaml | 2 +- .../karpenter_nodes/examples/nodegroups.yaml | 27 +- .../examples/output/output.yaml | 231 +++++++++++++++++- charts/karpenter_nodes/examples/userdata.yaml | 4 +- 5 files changed, 226 insertions(+), 40 deletions(-) diff --git a/charts/karpenter_nodes/Chart.yaml b/charts/karpenter_nodes/Chart.yaml index b73bca0..1d0304f 100644 --- a/charts/karpenter_nodes/Chart.yaml +++ b/charts/karpenter_nodes/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: karpenter_nodes -version: 1.0.2 +version: 1.0.3 description: A Helm chart for generating NodeClasses and NodePools for Karpenter maintainers: - name: nadavbuc diff --git a/charts/karpenter_nodes/examples/argocd_example.yaml b/charts/karpenter_nodes/examples/argocd_example.yaml index e520cd8..84b2a26 100644 --- a/charts/karpenter_nodes/examples/argocd_example.yaml +++ b/charts/karpenter_nodes/examples/argocd_example.yaml @@ -9,7 +9,7 @@ spec: sources: - repoURL: 'https://opensource.fiverr.com/public_charts/' chart: karpenter_nodes - targetRevision: 1.0.1 + targetRevision: 1.0.3 helm: valueFiles: - $values/karpenter_nodes/eks-dev/common.yaml diff --git a/charts/karpenter_nodes/examples/nodegroups.yaml b/charts/karpenter_nodes/examples/nodegroups.yaml index addaaef..2cdf1c0 100644 --- a/charts/karpenter_nodes/examples/nodegroups.yaml +++ b/charts/karpenter_nodes/examples/nodegroups.yaml @@ -48,10 +48,6 @@ nodeGroups: capacitySpread: start: 1 end: 5 - taints: - - key: "dedicated" - value: "canary" - effect: "NoSchedule" nodes-jobs: expireAfter: "Never" instances: @@ -61,10 +57,6 @@ nodeGroups: - "8" - "16" consolidationPolicy: "WhenEmpty" - taints: - - key: "dedicated" - value: "jobs" - effect: "NoSchedule" blockDeviceMappings: - deviceName: /dev/xvda ebs: @@ -84,10 +76,6 @@ nodeGroups: minGeneration: 7 cores: - "8" - taints: - - key: "dedicated" - value: "ingress" - effect: "NoSchedule" nodes-monitoring: labels: prometheus-scrape: "true" #Not Real Use-case @@ -98,12 +86,7 @@ nodeGroups: architecture: "multiarch" capacityType: - on-demand - taints: - - key: "dedicated" - value: "monitoring" - effect: "NoSchedule" - excludeFamilies: - - x1 #We dont have X in our types but i do want it to not exclude previously defined amd instances + excludeFamilies: [] nodes-gpu: labels: gpu: "true" @@ -120,8 +103,6 @@ nodeGroups: - key: "dedicated" value: "gpu" effect: "NoSchedule" - - - - - + nodes-cilium-managed: + instances: {} + cilium: true diff --git a/charts/karpenter_nodes/examples/output/output.yaml b/charts/karpenter_nodes/examples/output/output.yaml index e29b675..b4d2e88 100644 --- a/charts/karpenter_nodes/examples/output/output.yaml +++ b/charts/karpenter_nodes/examples/output/output.yaml @@ -117,6 +117,112 @@ spec: # Source: karpenter_nodes/templates/nodeclass.yaml apiVersion: karpenter.k8s.aws/v1beta1 kind: EC2NodeClass +metadata: + name: "nodes-cilium-managed-amd64" +spec: + role: eks_nodes_role + amiFamily: AL2 + amiSelectorTerms: + subnetSelectorTerms: + - tags: + Name: eks-dev-eu-west-1a + - tags: + Name: eks-dev-eu-west-1b + - tags: + Name: eks-dev-eu-west-1c + securityGroupSelectorTerms: + - tags: + Name: eks-nodes + - tags: + Name: eks-dev + tags: + cluster: eks-dev + nodegroup: nodes-cilium-managed + component: eks-karpenter-nodes + created_by: helm + team: devops + managed_by: karpenter + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + deleteOnTermination: true + encrypted: true + iops: 3000 + throughput: 125 + volumeSize: 100Gi + volumeType: gp3 + detailedMonitoring: false + associatePublicIPAddress: false + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: required + userData: | + CLUSTER_NAME=eks-dev + INSTANCEGROUP=nodes-cilium-managed + INSTANCE_ID=`/usr/bin/ec2-metadata --instance-id | awk '{print $2}'` + ID_SUFFIX=`/usr/bin/ec2-metadata --instance-id | awk '{print substr($0,length-5,6)}'` + HOSTNAME="${CLUSTER_NAME}-${INSTANCEGROUP}-$ID_SUFFIX" + hostname $HOSTNAME + echo $HOSTNAME > /etc/hostname + aws ec2 create-tags --resources $INSTANCE_ID --tags=Key=Name,Value=$HOSTNAME + sed -i "s/127.0.0.1 [0-9a-z-]*\s*localhost/127.0.0.1 $HOSTNAME localhost/" /etc/hosts + # Sysctl changes + ## Disable IPv6 + cat < /etc/sysctl.d/10-disable-ipv6.conf + # disable ipv6 config + net.ipv6.conf.all.disable_ipv6 = 1 + net.ipv6.conf.default.disable_ipv6 = 1 + net.ipv6.conf.lo.disable_ipv6 = 1 + EOF + ## Stolen from this guy: https://blog.codeship.com/running-1000-containers-in-docker-swarm/ + cat < /etc/sysctl.d/99-kube-net.conf + # Have a larger connection range available + net.ipv4.ip_local_port_range=1024 65000 + # Reuse closed sockets faster + net.ipv4.tcp_tw_reuse=1 + net.ipv4.tcp_fin_timeout=15 + # The maximum number of "backlogged sockets". Default is 128. + net.core.somaxconn=4096 + net.core.netdev_max_backlog=4096 + # 16MB per socket - which sounds like a lot, + # but will virtually never consume that much. + net.core.rmem_max=16777216 + net.core.wmem_max=16777216 + # Various network tunables + net.ipv4.tcp_max_syn_backlog=20480 + net.ipv4.tcp_max_tw_buckets=400000 + net.ipv4.tcp_no_metrics_save=1 + net.ipv4.tcp_rmem=4096 87380 16777216 + net.ipv4.tcp_syn_retries=2 + net.ipv4.tcp_synack_retries=2 + net.ipv4.tcp_wmem=4096 65536 16777216 + #vm.min_free_kbytes=65536 + # Connection tracking to prevent dropped connections (usually issue on LBs) + net.netfilter.nf_conntrack_max=262144 + net.ipv4.netfilter.ip_conntrack_generic_timeout=120 + net.netfilter.nf_conntrack_tcp_timeout_established=86400 + # ARP cache settings for a highly loaded docker swarm + net.ipv4.neigh.default.gc_thresh1=8096 + net.ipv4.neigh.default.gc_thresh2=12288 + net.ipv4.neigh.default.gc_thresh3=16384 + EOF + systemctl restart systemd-sysctl.service + #Increase RegistryQPS + echo "$(jq '.registryPullQPS=100' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json + echo "$(jq '.registryBurst=200' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json + mkdir -p /etc/containerd/certs.d/docker.io + cat</etc/containerd/certs.d/docker.io/hosts.toml + server = "https://registry-1.docker.io" + [host."http://registry"] + capabilities = ["pull", "resolve"] + skip_verify = true + EOF +--- +# Source: karpenter_nodes/templates/nodeclass.yaml +apiVersion: karpenter.k8s.aws/v1beta1 +kind: EC2NodeClass metadata: name: "nodes-default-amd64" spec: @@ -762,9 +868,6 @@ spec: - key: dedicated effect: NoSchedule value: nodes_canary - - key: dedicated - value: canary - effect: NoSchedule requirements: - key: "karpenter.k8s.aws/instance-category" operator: In @@ -866,6 +969,118 @@ spec: # Source: karpenter_nodes/templates/nodepool.yaml apiVersion: karpenter.sh/v1beta1 kind: NodePool +metadata: + name: "nodes-cilium-managed-amd64" +spec: + template: + metadata: + labels: + nodegroup: nodes-cilium-managed + cluster: eks-dev + spec: + nodeClassRef: + name: nodes-cilium-managed-amd64 + taints: + - key: dedicated + effect: NoSchedule + value: nodes_cilium_managed + startupTaints: + - key: node.cilium.io/agent-not-ready + value: "true" + effect: NoExecute + requirements: + - key: "karpenter.k8s.aws/instance-category" + operator: In + values: + - m + - r + - c + - key: "karpenter.k8s.aws/instance-cpu" + operator: In + values: + - "4" + - "8" + - "12" + - "16" + - "24" + - "32" + - "48" + - key: karpenter.k8s.aws/instance-generation + operator: Gt + values: + - "3" + - key: "topology.kubernetes.io/zone" + operator: In + values: + - eu-west-1a + - eu-west-1b + - eu-west-1c + - key: "kubernetes.io/arch" + operator: In + values: + - amd64 + - key: "karpenter.sh/capacity-type" + operator: In + values: + - spot + - key: kubernetes.io/os + operator: In + values: + - linux + - key: "karpenter.k8s.aws/instance-family" + operator: NotIn + values: + - c6a + - m6a + - r6a + - c5a + - m5a + - r5a + - c6ad + - m6ad + - r6ad + - m5ad + - r5ad + - r5ad + - key: "karpenter.k8s.aws/instance-size" + operator: NotIn + values: + - metal + kubelet: + systemReserved: + cpu: 250m + memory: 200Mi + ephemeral-storage: 2Gi + kubeReserved: + cpu: 250m + memory: 1Gi + ephemeral-storage: 4Gi + evictionHard: + memory.available: 768Mi + nodefs.available: 8% + nodefs.inodesFree: 8% + evictionSoft: + memory.available: 1280Mi + nodefs.available: 10% + nodefs.inodesFree: 15% + imagefs.available: 10% + imagefs.inodesFree: 10% + pid.available: 10% + evictionSoftGracePeriod: + imagefs.available: 10m0s + imagefs.inodesFree: 10m0s + memory.available: 5m0s + nodefs.available: 10m0s + nodefs.inodesFree: 10m0s + pid.available: 2m0s + disruption: + expireAfter: 720h + consolidationPolicy: WhenUnderutilized + weight: 1 +--- +# Source: karpenter_nodes/templates/nodepool.yaml +apiVersion: karpenter.sh/v1beta1 +kind: NodePool metadata: name: "nodes-default-amd64" spec: @@ -1199,9 +1414,6 @@ spec: - key: dedicated effect: NoSchedule value: nodes_ingress - - key: dedicated - value: ingress - effect: NoSchedule requirements: - key: "karpenter.k8s.aws/instance-category" operator: In @@ -1305,9 +1517,6 @@ spec: - key: dedicated effect: NoSchedule value: nodes_jobs - - key: dedicated - value: jobs - effect: NoSchedule requirements: - key: "karpenter.k8s.aws/instance-category" operator: In @@ -1413,9 +1622,6 @@ spec: - key: dedicated effect: NoSchedule value: nodes_monitoring - - key: dedicated - value: monitoring - effect: NoSchedule requirements: - key: "karpenter.k8s.aws/instance-category" operator: In @@ -1459,7 +1665,6 @@ spec: - key: "karpenter.k8s.aws/instance-family" operator: NotIn values: - - x1 - key: "karpenter.k8s.aws/instance-size" operator: NotIn values: diff --git a/charts/karpenter_nodes/examples/userdata.yaml b/charts/karpenter_nodes/examples/userdata.yaml index 9a30a5f..bd9ac6f 100644 --- a/charts/karpenter_nodes/examples/userdata.yaml +++ b/charts/karpenter_nodes/examples/userdata.yaml @@ -1,4 +1,4 @@ -registry: "registry-1.docker.io" +registry: "https://registry-1.docker.io" registryCache: "true" registryHost: "http://registry" @@ -59,7 +59,7 @@ userData: | {{- if eq ( .value.registryCache | default $.Values.registryCache ) "true" }} mkdir -p /etc/containerd/certs.d/docker.io cat</etc/containerd/certs.d/docker.io/hosts.toml - server = "https://registry-1.docker.io" + server = "{{ .value.registry | default $.Values.registry }}" [host."{{ .value.registryHost | default $.Values.registryHost }}"] capabilities = ["pull", "resolve"] skip_verify = true