diff --git a/Dockerfile b/Dockerfile index 0f95bd3a8b..0ff33716e2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -97,7 +97,7 @@ RUN CHART_VERSION="1.9.808" CHART_FILE=/charts/rke2-cilium.yam RUN CHART_VERSION="v3.19.1-build2021061107" CHART_FILE=/charts/rke2-canal.yaml CHART_BOOTSTRAP=true /charts/build-chart.sh RUN CHART_VERSION="v3.1907" CHART_FILE=/charts/rke2-calico.yaml CHART_BOOTSTRAP=true /charts/build-chart.sh RUN CHART_VERSION="v1.0.007" CHART_FILE=/charts/rke2-calico-crd.yaml CHART_BOOTSTRAP=true /charts/build-chart.sh -RUN CHART_VERSION="1.16.201-build2021072301" CHART_FILE=/charts/rke2-coredns.yaml CHART_BOOTSTRAP=true /charts/build-chart.sh +RUN CHART_VERSION="1.16.201-build2021072303" CHART_FILE=/charts/rke2-coredns.yaml CHART_BOOTSTRAP=true /charts/build-chart.sh RUN CHART_VERSION="3.34.002" CHART_FILE=/charts/rke2-ingress-nginx.yaml CHART_BOOTSTRAP=false /charts/build-chart.sh RUN CHART_VERSION="v1.21.3-rke2r1-build2021072101" \ CHART_PACKAGE="rke2-kube-proxy-1.21" CHART_FILE=/charts/rke2-kube-proxy.yaml CHART_BOOTSTRAP=true /charts/build-chart.sh diff --git a/bundle/bin/rke2-killall.sh b/bundle/bin/rke2-killall.sh index ded53674c7..39064161c6 100755 --- a/bundle/bin/rke2-killall.sh +++ b/bundle/bin/rke2-killall.sh @@ -73,5 +73,15 @@ ip link delete flannel.1 ip link delete vxlan.calico ip link delete cilium_vxlan ip link delete cilium_net + +#Delete the nodeLocal created objects +if [ -d /sys/class/net/nodelocaldns ]; then + for i in $(ip address show nodelocaldns | grep inet | awk '{print $2}'); + do + iptables-save | grep -v $i | iptables-restore + done + ip link delete nodelocaldns +fi + rm -rf /var/lib/cni/ iptables-save | grep -v KUBE- | grep -v CNI- | grep -v cali- | grep -v cali: | grep -v CILIUM_ | iptables-restore diff --git a/docs/networking.md b/docs/networking.md index 766a8eb06e..e67e5b3fa5 100644 --- a/docs/networking.md +++ b/docs/networking.md @@ -18,6 +18,41 @@ If you don't install CoreDNS, you will need to install a cluster DNS provider yo CoreDNS is deployed with the [autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) by default. To disable it or change its config, use the [HelmChartConfig](https://docs.rke2.io/helm/#customizing-packaged-components-with-helmchartconfig) resource. +### NodeLocal DNSCache + +[NodeLocal DNSCache](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) improves the performance by running a dns caching agent on each node. To activate this feature, apply the following HelmChartConfig: + +```yaml +--- +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: rke2-coredns + namespace: kube-system +spec: + valuesContent: |- + nodelocal: + enabled: true +``` +The helm controller will redeploy coredns with the new config. Please be aware that nodelocal modifies the iptables of the node to intercept DNS traffic. Therefore, activating and then deactivating this feature without redeploying, will cause the DNS service to stop working. + +Note that NodeLocal DNSCache must be deployed in ipvs mode if kube-proxy is using that mode. To deploy it in this mode, apply the following HelmChartConfig: + +```yaml +--- +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: rke2-coredns + namespace: kube-system +spec: + valuesContent: |- + nodelocal: + enabled: true + ipvs: true +``` + + ## Nginx Ingress Controller [nginx-ingress](https://github.com/kubernetes/ingress-nginx) is an Ingress controller powered by NGINX that uses a ConfigMap to store the NGINX configuration. diff --git a/scripts/airgap/dnsNodeCache-test.yaml b/scripts/airgap/dnsNodeCache-test.yaml new file mode 100644 index 0000000000..9289bc28cb --- /dev/null +++ b/scripts/airgap/dnsNodeCache-test.yaml @@ -0,0 +1,9 @@ +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: rke2-coredns + namespace: kube-system +spec: + valuesContent: |- + nodelocal: + enabled: true diff --git a/scripts/build-images b/scripts/build-images index 0da0850fa3..c39bf4f892 100755 --- a/scripts/build-images +++ b/scripts/build-images @@ -15,6 +15,8 @@ xargs -n1 -t docker image pull --quiet << EOF >> build/images-core.txt ${REGISTRY}/rancher/hardened-kubernetes:${KUBERNETES_IMAGE_TAG} ${REGISTRY}/rancher/hardened-coredns:v1.8.3-build20210720 ${REGISTRY}/rancher/mirrored-cluster-proportional-autoscaler:1.8.3 + ${REGISTRY}/rancher/mirrored-k8s-dns-node-cache:1.15.13 + ${REGISTRY}/rancher/library-busybox:1.32.1 ${REGISTRY}/rancher/hardened-etcd:${ETCD_VERSION}-${IMAGE_BUILD_VERSION} ${REGISTRY}/rancher/hardened-k8s-metrics-server:v0.3.6-${IMAGE_BUILD_VERSION} ${REGISTRY}/rancher/klipper-helm:v0.6.1-build20210616 diff --git a/scripts/test-helpers b/scripts/test-helpers index c3fd5a4bfd..6b59fc5d31 100755 --- a/scripts/test-helpers +++ b/scripts/test-helpers @@ -68,8 +68,7 @@ pod-ready() { } export -f pod-ready -# --- - +# -- wait-for-services() accepts container names as input and checks if they are ready. It does not check Kubernetes services! wait-for-services() { for service in $@; do while [[ "$(pod-ready $service | sort -u)" != 'true' ]]; do diff --git a/scripts/test-run-basics b/scripts/test-run-basics index ad58bb33da..10c0b947c5 100755 --- a/scripts/test-run-basics +++ b/scripts/test-run-basics @@ -19,6 +19,7 @@ export WAIT_SERVICES="${all_services[@]}" start-test() { #docker exec $(cat $TEST_DIR/servers/1/metadata/name) check-config || true + use-nodelocal-DNSCache verify-valid-versions $(cat $TEST_DIR/servers/1/metadata/name) verify-airgap-images $(cat $TEST_DIR/{servers,agents}/*/metadata/name) } @@ -44,5 +45,14 @@ verify-airgap-images() { } export -f verify-airgap-images + +# -- Enable the nodelocal DNSCache so that its images are used +use-nodelocal-DNSCache() { + local DNS_nodeCache_manifest='scripts/airgap/dnsNodeCache-test.yaml' + kubectl apply -f $DNS_nodeCache_manifest + wait-for-services node-cache +} +export -f use-nodelocal-DNSCache + # --- create a basic cluster and check for valid versions LABEL=BASICS run-test