Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade Cilium to 1.16 #517

Merged
merged 1 commit into from
Jan 1, 2025
Merged

Upgrade Cilium to 1.16 #517

merged 1 commit into from
Jan 1, 2025

Conversation

timtorChen
Copy link
Owner

@timtorChen timtorChen commented Dec 31, 2024

Cilium 1.14 -> 1.15 -> 1.16 upgrad note.

Cilium 1.15

Support Kubernetes version 1.26-1.29

  • Fully support Kubernetes Gateway API 1.0
  • Hubble exporter feature added in cilium-agent. It write hubble flow in files for later log consumption.

https://isovalent.com/blog/post/cilium-1-15/
https://docs.cilium.io/en/v1.15/operations/upgrade

Cilium 1.16

Support Kubernetes version 1.27-1.30

  • Envoy is separated from Cilium agent DaemonSet and become a dedicated DaemonSet.
  • Support Kubernetes Service Traffic Distribution model, the successor of topology-aware routing.
  • Supports Kubernetes Gateway API 1.1

https://isovalent.com/blog/post/cilium-1-16/
https://docs.cilium.io/en/v1.16/operations/upgrade

Copy link

--- HelmRelease: kube-system/cilium ServiceAccount: kube-system/hubble-relay

+++ HelmRelease: kube-system/cilium ServiceAccount: kube-system/hubble-relay

@@ -1,7 +1,8 @@

 ---
 apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: hubble-relay
   namespace: kube-system
+automountServiceAccountToken: false
 
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-config

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-config

@@ -7,96 +7,129 @@

 data:
   identity-allocation-mode: crd
   identity-heartbeat-timeout: 30m0s
   identity-gc-interval: 15m0s
   cilium-endpoint-gc-interval: 5m0s
   nodes-gc-interval: 5m0s
-  skip-cnp-status-startup-clean: 'false'
   debug: 'false'
   debug-verbose: ''
   enable-policy: default
+  policy-cidr-match-mode: ''
   proxy-prometheus-port: '9964'
+  operator-prometheus-serve-addr: :9963
+  enable-metrics: 'true'
   enable-ipv4: 'true'
   enable-ipv6: 'false'
   custom-cni-conf: 'false'
-  enable-bpf-clock-probe: 'true'
+  enable-bpf-clock-probe: 'false'
   monitor-aggregation: medium
   monitor-aggregation-interval: 5s
   monitor-aggregation-flags: all
   bpf-map-dynamic-size-ratio: '0.0025'
   bpf-policy-map-max: '16384'
   bpf-lb-map-max: '65536'
   bpf-lb-external-clusterip: 'false'
+  bpf-events-drop-enabled: 'true'
+  bpf-events-policy-verdict-enabled: 'true'
+  bpf-events-trace-enabled: 'true'
   preallocate-bpf-maps: 'false'
-  sidecar-istio-proxy-image: cilium/istio_proxy
   cluster-name: default
   cluster-id: '0'
+  routing-mode: tunnel
+  tunnel-protocol: vxlan
+  service-no-backend-response: reject
   enable-l7-proxy: 'true'
   enable-ipv4-masquerade: 'true'
   enable-ipv4-big-tcp: 'false'
   enable-ipv6-big-tcp: 'false'
   enable-ipv6-masquerade: 'true'
+  enable-tcx: 'true'
+  datapath-mode: veth
+  enable-masquerade-to-route-source: 'false'
   enable-xt-socket-fallback: 'true'
   install-no-conntrack-iptables-rules: 'false'
   auto-direct-node-routes: 'false'
+  direct-routing-skip-unreachable: 'false'
   enable-local-redirect-policy: 'false'
-  kube-proxy-replacement: strict
+  enable-runtime-device-detection: 'true'
+  kube-proxy-replacement: 'true'
   kube-proxy-replacement-healthz-bind-address: ''
   bpf-lb-sock: 'false'
+  bpf-lb-sock-terminate-pod-connections: 'false'
+  nodeport-addresses: ''
   enable-health-check-nodeport: 'true'
+  enable-health-check-loadbalancer-ip: 'false'
   node-port-bind-protection: 'true'
   enable-auto-protect-node-port-range: 'true'
+  bpf-lb-acceleration: disabled
   enable-svc-source-range-check: 'true'
   enable-l2-neigh-discovery: 'true'
   arping-refresh-period: 30s
+  k8s-require-ipv4-pod-cidr: 'false'
+  k8s-require-ipv6-pod-cidr: 'false'
   enable-k8s-networkpolicy: 'true'
   write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
   cni-exclusive: 'true'
   cni-log-file: /var/run/cilium/cilium-cni.log
   enable-endpoint-health-checking: 'true'
   enable-health-checking: 'true'
   enable-well-known-identities: 'false'
-  enable-remote-node-identity: 'true'
+  enable-node-selector-labels: 'false'
   synchronize-k8s-nodes: 'true'
   operator-api-serve-addr: 127.0.0.1:9234
   enable-hubble: 'true'
   hubble-socket-path: /var/run/cilium/hubble.sock
+  hubble-export-file-max-size-mb: '10'
+  hubble-export-file-max-backups: '5'
   hubble-listen-address: :4244
   hubble-disable-tls: 'false'
   hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
   hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
   hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
   ipam: kubernetes
   ipam-cilium-node-update-rate: 15s
-  disable-cnp-status-updates: 'true'
-  cnp-node-status-gc-interval: 0s
+  egress-gateway-reconciliation-trigger-interval: 1s
   enable-vtep: 'false'
   vtep-endpoint: ''
   vtep-cidr: ''
   vtep-mask: ''
   vtep-mac: ''
-  enable-bgp-control-plane: 'false'
   procfs: /host/proc
   bpf-root: /sys/fs/bpf
   cgroup-root: /sys/fs/cgroup
   enable-k8s-terminating-endpoint: 'true'
   enable-sctp: 'false'
+  k8s-client-qps: '10'
+  k8s-client-burst: '20'
   remove-cilium-node-taints: 'true'
   set-cilium-node-taints: 'true'
   set-cilium-is-up-condition: 'true'
   unmanaged-pod-watcher-interval: '15'
+  dnsproxy-enable-transparent-mode: 'true'
+  dnsproxy-socket-linger-timeout: '10'
   tofqdns-dns-reject-response-code: refused
   tofqdns-enable-dns-compression: 'true'
   tofqdns-endpoint-max-ip-per-hostname: '50'
   tofqdns-idle-connection-grace-period: 0s
   tofqdns-max-deferred-connection-deletes: '10000'
   tofqdns-proxy-response-max-delay: 100ms
   agent-not-ready-taint-key: node.cilium.io/agent-not-ready
   mesh-auth-enabled: 'true'
   mesh-auth-queue-size: '1024'
   mesh-auth-rotated-identities-queue-size: '1024'
-  mesh-auth-expired-gc-interval: 15m0s
+  mesh-auth-gc-interval: 5m0s
+  proxy-xff-num-trusted-hops-ingress: '0'
+  proxy-xff-num-trusted-hops-egress: '0'
   proxy-connect-timeout: '2'
+  proxy-initial-fetch-timeout: '30'
   proxy-max-requests-per-connection: '0'
   proxy-max-connection-duration-seconds: '0'
+  proxy-idle-timeout-seconds: '60'
   external-envoy-proxy: 'false'
+  envoy-base-id: '0'
+  envoy-keep-cap-netbindservice: 'false'
+  max-connected-clusters: '255'
+  clustermesh-enable-endpoint-sync: 'false'
+  clustermesh-enable-mcs-api: 'false'
+  nat-map-stats-entries: '32'
+  nat-map-stats-interval: 30s
 
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-relay-config

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-relay-config

@@ -2,13 +2,13 @@

 apiVersion: v1
 kind: ConfigMap
 metadata:
   name: hubble-relay-config
   namespace: kube-system
 data:
-  config.yaml: "cluster-name: default\npeer-service: \"hubble-peer.kube-system.svc.cluster.local:443\"\
+  config.yaml: "cluster-name: default\npeer-service: \"hubble-peer.kube-system.svc.cluster.local.:443\"\
     \nlisten-address: :4245\ngops: true\ngops-port: \"9893\"\ndial-timeout: \nretry-timeout:\
-    \ \nsort-buffer-len-max: \nsort-buffer-drain-timeout: \ntls-client-cert-file:\
-    \ /var/lib/hubble-relay/tls/client.crt\ntls-client-key-file: /var/lib/hubble-relay/tls/client.key\n\
-    tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\ndisable-server-tls:\
-    \ true\n"
+    \ \nsort-buffer-len-max: \nsort-buffer-drain-timeout: \ntls-hubble-client-cert-file:\
+    \ /var/lib/hubble-relay/tls/client.crt\ntls-hubble-client-key-file: /var/lib/hubble-relay/tls/client.key\n\
+    tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\n\n\
+    disable-server-tls: true\n"
 
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-ui-nginx

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-ui-nginx

@@ -15,8 +15,10 @@

     \ range,keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout;\n\
     \        if ($request_method = OPTIONS) {\n            return 204;\n        }\n\
     \        # /CORS\n\n        location /api {\n            proxy_http_version 1.1;\n\
     \            proxy_pass_request_headers on;\n            proxy_hide_header Access-Control-Allow-Origin;\n\
     \            proxy_pass http://127.0.0.1:8090;\n        }\n        location /\
     \ {\n            # double `/index.html` is required here \n            try_files\
-    \ $uri $uri/ /index.html /index.html;\n        }\n    }\n}"
+    \ $uri $uri/ /index.html /index.html;\n        }\n\n        # Liveness probe\n\
+    \        location /healthz {\n            access_log off;\n            add_header\
+    \ Content-Type text/plain;\n            return 200 'ok';\n        }\n    }\n}"
 
--- HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium

+++ HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium

@@ -44,12 +44,15 @@

   - get
 - apiGroups:
   - cilium.io
   resources:
   - ciliumloadbalancerippools
   - ciliumbgppeeringpolicies
+  - ciliumbgpnodeconfigs
+  - ciliumbgpadvertisements
+  - ciliumbgppeerconfigs
   - ciliumclusterwideenvoyconfigs
   - ciliumclusterwidenetworkpolicies
   - ciliumegressgatewaypolicies
   - ciliumendpoints
   - ciliumendpointslices
   - ciliumenvoyconfigs
@@ -93,14 +96,13 @@

   verbs:
   - get
   - update
 - apiGroups:
   - cilium.io
   resources:
-  - ciliumnetworkpolicies/status
-  - ciliumclusterwidenetworkpolicies/status
   - ciliumendpoints/status
   - ciliumendpoints
   - ciliuml2announcementpolicies/status
+  - ciliumbgpnodeconfigs/status
   verbs:
   - patch
 
--- HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium-operator

+++ HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium-operator

@@ -15,12 +15,20 @@

   - list
   - watch
   - delete
 - apiGroups:
   - ''
   resources:
+  - configmaps
+  resourceNames:
+  - cilium-config
+  verbs:
+  - patch
+- apiGroups:
+  - ''
+  resources:
   - nodes
   verbs:
   - list
   - watch
 - apiGroups:
   - ''
@@ -116,12 +124,15 @@

   - update
 - apiGroups:
   - cilium.io
   resources:
   - ciliumendpointslices
   - ciliumenvoyconfigs
+  - ciliumbgppeerconfigs
+  - ciliumbgpadvertisements
+  - ciliumbgpnodeconfigs
   verbs:
   - create
   - update
   - get
   - list
   - watch
@@ -142,12 +153,17 @@

   - customresourcedefinitions
   verbs:
   - update
   resourceNames:
   - ciliumloadbalancerippools.cilium.io
   - ciliumbgppeeringpolicies.cilium.io
+  - ciliumbgpclusterconfigs.cilium.io
+  - ciliumbgppeerconfigs.cilium.io
+  - ciliumbgpadvertisements.cilium.io
+  - ciliumbgpnodeconfigs.cilium.io
+  - ciliumbgpnodeconfigoverrides.cilium.io
   - ciliumclusterwideenvoyconfigs.cilium.io
   - ciliumclusterwidenetworkpolicies.cilium.io
   - ciliumegressgatewaypolicies.cilium.io
   - ciliumendpoints.cilium.io
   - ciliumendpointslices.cilium.io
   - ciliumenvoyconfigs.cilium.io
@@ -162,12 +178,15 @@

   - ciliumpodippools.cilium.io
 - apiGroups:
   - cilium.io
   resources:
   - ciliumloadbalancerippools
   - ciliumpodippools
+  - ciliumbgppeeringpolicies
+  - ciliumbgpclusterconfigs
+  - ciliumbgpnodeconfigoverrides
   verbs:
   - get
   - list
   - watch
 - apiGroups:
   - cilium.io
--- HelmRelease: kube-system/cilium Service: kube-system/hubble-relay

+++ HelmRelease: kube-system/cilium Service: kube-system/hubble-relay

@@ -12,8 +12,8 @@

   type: ClusterIP
   selector:
     k8s-app: hubble-relay
   ports:
   - protocol: TCP
     port: 80
-    targetPort: 4245
+    targetPort: grpc
 
--- HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium

+++ HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium

@@ -11,27 +11,29 @@

 spec:
   selector:
     matchLabels:
       k8s-app: cilium
   updateStrategy:
     rollingUpdate:
-      maxUnavailable: 2
+      maxSurge: 0
+      maxUnavailable: 1
     type: RollingUpdate
   template:
     metadata:
-      annotations:
-        container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
-        container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
+      annotations: null
       labels:
         k8s-app: cilium
         app.kubernetes.io/name: cilium-agent
         app.kubernetes.io/part-of: cilium
     spec:
+      securityContext:
+        appArmorProfile:
+          type: Unconfined
       containers:
       - name: cilium-agent
-        image: quay.io/cilium/cilium:v1.14.0-snapshot.4
+        image: quay.io/cilium/cilium:v1.16.5@sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d
         imagePullPolicy: IfNotPresent
         command:
         - cilium-agent
         args:
         - --config-dir=/tmp/cilium/config-map
         startupProbe:
@@ -43,12 +45,13 @@

             httpHeaders:
             - name: brief
               value: 'true'
           failureThreshold: 105
           periodSeconds: 2
           successThreshold: 1
+          initialDelaySeconds: 5
         livenessProbe:
           httpGet:
             host: 127.0.0.1
             path: /healthz
             port: 9879
             scheme: HTTP
@@ -82,17 +85,47 @@

           valueFrom:
             fieldRef:
               apiVersion: v1
               fieldPath: metadata.namespace
         - name: CILIUM_CLUSTERMESH_CONFIG
           value: /var/lib/cilium/clustermesh/
+        - name: GOMEMLIMIT
+          valueFrom:
+            resourceFieldRef:
+              resource: limits.memory
+              divisor: '1'
         - name: KUBERNETES_SERVICE_HOST
           value: localhost
         - name: KUBERNETES_SERVICE_PORT
           value: '7745'
         lifecycle:
+          postStart:
+            exec:
+              command:
+              - bash
+              - -c
+              - |
+                set -o errexit
+                set -o pipefail
+                set -o nounset
+
+                # When running in AWS ENI mode, it's likely that 'aws-node' has
+                # had a chance to install SNAT iptables rules. These can result
+                # in dropped traffic, so we should attempt to remove them.
+                # We do it using a 'postStart' hook since this may need to run
+                # for nodes which might have already been init'ed but may still
+                # have dangling rules. This is safe because there are no
+                # dependencies on anything that is part of the startup script
+                # itself, and can be safely run multiple times per node (e.g. in
+                # case of a restart).
+                if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
+                then
+                    echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
+                    iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
+                fi
+                echo 'Done!'
           preStop:
             exec:
               command:
               - /cni-uninstall.sh
         securityContext:
           seLinuxOptions:
@@ -140,16 +173,16 @@

           mountPath: /var/lib/cilium/tls/hubble
           readOnly: true
         - name: tmp
           mountPath: /tmp
       initContainers:
       - name: config
-        image: quay.io/cilium/cilium:v1.14.0-snapshot.4
-        imagePullPolicy: IfNotPresent
-        command:
-        - cilium
+        image: quay.io/cilium/cilium:v1.16.5@sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d
+        imagePullPolicy: IfNotPresent
+        command:
+        - cilium-dbg
         - build-config
         env:
         - name: K8S_NODE_NAME
           valueFrom:
             fieldRef:
               apiVersion: v1
@@ -164,14 +197,44 @@

         - name: KUBERNETES_SERVICE_PORT
           value: '7745'
         volumeMounts:
         - name: tmp
           mountPath: /tmp
         terminationMessagePolicy: FallbackToLogsOnError
+      - name: apply-sysctl-overwrites
+        image: quay.io/cilium/cilium:v1.16.5@sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: BIN_PATH
+          value: /opt/cni/bin
+        command:
+        - sh
+        - -ec
+        - |
+          cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
+          nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
+          rm /hostbin/cilium-sysctlfix
+        volumeMounts:
+        - name: hostproc
+          mountPath: /hostproc
+        - name: cni-path
+          mountPath: /hostbin
+        terminationMessagePolicy: FallbackToLogsOnError
+        securityContext:
+          seLinuxOptions:
+            level: s0
+            type: spc_t
+          capabilities:
+            add:
+            - SYS_ADMIN
+            - SYS_CHROOT
+            - SYS_PTRACE
+            drop:
+            - ALL
       - name: mount-bpf-fs
-        image: quay.io/cilium/cilium:v1.14.0-snapshot.4
+        image: quay.io/cilium/cilium:v1.16.5@sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d
         imagePullPolicy: IfNotPresent
         args:
         - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
         command:
         - /bin/bash
         - -c
@@ -181,13 +244,13 @@

           privileged: true
         volumeMounts:
         - name: bpf-maps
           mountPath: /sys/fs/bpf
           mountPropagation: Bidirectional
       - name: clean-cilium-state
-        image: quay.io/cilium/cilium:v1.14.0-snapshot.4
+        image: quay.io/cilium/cilium:v1.16.5@sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d
         imagePullPolicy: IfNotPresent
         command:
         - /init-container.sh
         env:
         - name: CILIUM_ALL_STATE
           valueFrom:
@@ -197,12 +260,18 @@

               optional: true
         - name: CILIUM_BPF_STATE
           valueFrom:
             configMapKeyRef:
               name: cilium-config
               key: clean-cilium-bpf-state
+              optional: true
+        - name: WRITE_CNI_CONF_WHEN_READY
+          valueFrom:
+            configMapKeyRef:
+              name: cilium-config
+              key: write-cni-conf-when-ready
               optional: true
         - name: KUBERNETES_SERVICE_HOST
           value: localhost
         - name: KUBERNETES_SERVICE_PORT
           value: '7745'
         terminationMessagePolicy: FallbackToLogsOnError
@@ -222,18 +291,14 @@

           mountPath: /sys/fs/bpf
         - name: cilium-cgroup
           mountPath: /sys/fs/cgroup
           mountPropagation: HostToContainer
         - name: cilium-run
           mountPath: /var/run/cilium
-        resources:
-          requests:
-            cpu: 100m
-            memory: 100Mi
       - name: install-cni-binaries
-        image: quay.io/cilium/cilium:v1.14.0-snapshot.4
+        image: quay.io/cilium/cilium:v1.16.5@sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d
         imagePullPolicy: IfNotPresent
         command:
         - /install-plugin.sh
         resources:
           requests:
             cpu: 100m
@@ -248,13 +313,12 @@

         terminationMessagePolicy: FallbackToLogsOnError
         volumeMounts:
         - name: cni-path
           mountPath: /host/opt/cni/bin
       restartPolicy: Always
       priorityClassName: system-node-critical
-      serviceAccount: cilium
       serviceAccountName: cilium
       automountServiceAccountToken: true
       terminationGracePeriodSeconds: 1
       hostNetwork: true
       affinity:
         podAntiAffinity:
@@ -275,12 +339,16 @@

           path: /var/run/cilium
           type: DirectoryOrCreate
       - name: bpf-maps
         hostPath:
           path: /sys/fs/bpf
           type: DirectoryOrCreate
+      - name: hostproc
+        hostPath:
+          path: /proc
+          type: Directory
       - name: cilium-cgroup
         hostPath:
           path: /sys/fs/cgroup
           type: DirectoryOrCreate
       - name: cni-path
         hostPath:
@@ -311,12 +379,22 @@

               - key: tls.key
                 path: common-etcd-client.key
               - key: tls.crt
                 path: common-etcd-client.crt
               - key: ca.crt
                 path: common-etcd-client-ca.crt
+          - secret:
+              name: clustermesh-apiserver-local-cert
+              optional: true
+              items:
+              - key: tls.key
+                path: local-etcd-client.key
+              - key: tls.crt
+                path: local-etcd-client.crt
+              - key: ca.crt
+                path: local-etcd-client-ca.crt
       - name: host-proc-sys-net
         hostPath:
           path: /proc/sys/net
           type: Directory
       - name: host-proc-sys-kernel
         hostPath:
--- HelmRelease: kube-system/cilium Deployment: kube-system/cilium-operator

+++ HelmRelease: kube-system/cilium Deployment: kube-system/cilium-operator

@@ -19,22 +19,24 @@

     rollingUpdate:
       maxSurge: 25%
       maxUnavailable: 50%
     type: RollingUpdate
   template:
     metadata:
-      annotations: null
+      annotations:
+        prometheus.io/port: '9963'
+        prometheus.io/scrape: 'true'
       labels:
         io.cilium/app: operator
         name: cilium-operator
         app.kubernetes.io/part-of: cilium
         app.kubernetes.io/name: cilium-operator
     spec:
       containers:
       - name: cilium-operator
-        image: quay.io/cilium/operator-generic:v1.14.0-snapshot.4
+        image: quay.io/cilium/operator-generic:v1.16.5@sha256:f7884848483bbcd7b1e0ccfd34ba4546f258b460cb4b7e2f06a1bcc96ef88039
         imagePullPolicy: IfNotPresent
         command:
         - cilium-operator-generic
         args:
         - --config-dir=/tmp/cilium/config-map
         - --debug=$(CILIUM_DEBUG)
@@ -56,12 +58,17 @@

               name: cilium-config
               optional: true
         - name: KUBERNETES_SERVICE_HOST
           value: localhost
         - name: KUBERNETES_SERVICE_PORT
           value: '7745'
+        ports:
+        - name: prometheus
+          containerPort: 9963
+          hostPort: 9963
+          protocol: TCP
         livenessProbe:
           httpGet:
             host: 127.0.0.1
             path: /healthz
             port: 9234
             scheme: HTTP
@@ -83,13 +90,12 @@

           mountPath: /tmp/cilium/config-map
           readOnly: true
         terminationMessagePolicy: FallbackToLogsOnError
       hostNetwork: true
       restartPolicy: Always
       priorityClassName: system-cluster-critical
-      serviceAccount: cilium-operator
       serviceAccountName: cilium-operator
       automountServiceAccountToken: true
       affinity:
         podAntiAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
           - labelSelector:
--- HelmRelease: kube-system/cilium Deployment: kube-system/hubble-relay

+++ HelmRelease: kube-system/cilium Deployment: kube-system/hubble-relay

@@ -33,38 +33,48 @@

           capabilities:
             drop:
             - ALL
           runAsGroup: 65532
           runAsNonRoot: true
           runAsUser: 65532
-        image: quay.io/cilium/hubble-relay:v1.14.0-snapshot.4
+        image: quay.io/cilium/hubble-relay:v1.16.5@sha256:6cfae1d1afa566ba941f03d4d7e141feddd05260e5cd0a1509aba1890a45ef00
         imagePullPolicy: IfNotPresent
         command:
         - hubble-relay
         args:
         - serve
         ports:
         - name: grpc
           containerPort: 4245
         readinessProbe:
-          tcpSocket:
-            port: grpc
+          grpc:
+            port: 4222
+          timeoutSeconds: 3
         livenessProbe:
-          tcpSocket:
-            port: grpc
+          grpc:
+            port: 4222
+          timeoutSeconds: 10
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          failureThreshold: 12
+        startupProbe:
+          grpc:
+            port: 4222
+          initialDelaySeconds: 10
+          failureThreshold: 20
+          periodSeconds: 3
         volumeMounts:
         - name: config
           mountPath: /etc/hubble-relay
           readOnly: true
         - name: tls
           mountPath: /var/lib/hubble-relay/tls
           readOnly: true
         terminationMessagePolicy: FallbackToLogsOnError
       restartPolicy: Always
       priorityClassName: null
-      serviceAccount: hubble-relay
       serviceAccountName: hubble-relay
       automountServiceAccountToken: false
       terminationGracePeriodSeconds: 1
       affinity:
         podAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
--- HelmRelease: kube-system/cilium Deployment: kube-system/hubble-ui

+++ HelmRelease: kube-system/cilium Deployment: kube-system/hubble-ui

@@ -22,32 +22,43 @@

       annotations: null
       labels:
         k8s-app: hubble-ui
         app.kubernetes.io/name: hubble-ui
         app.kubernetes.io/part-of: cilium
     spec:
+      securityContext:
+        fsGroup: 1001
+        runAsGroup: 1001
+        runAsUser: 1001
       priorityClassName: null
-      serviceAccount: hubble-ui
       serviceAccountName: hubble-ui
       automountServiceAccountToken: true
       containers:
       - name: frontend
-        image: quay.io/cilium/hubble-ui:v0.11.0@sha256:bcb369c47cada2d4257d63d3749f7f87c91dde32e010b223597306de95d1ecc8
+        image: quay.io/cilium/hubble-ui:v0.13.1@sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6
         imagePullPolicy: IfNotPresent
         ports:
         - name: http
           containerPort: 8081
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 8081
+        readinessProbe:
+          httpGet:
+            path: /
+            port: 8081
         volumeMounts:
         - name: hubble-ui-nginx-conf
           mountPath: /etc/nginx/conf.d/default.conf
           subPath: nginx.conf
         - name: tmp-dir
           mountPath: /tmp
         terminationMessagePolicy: FallbackToLogsOnError
       - name: backend
-        image: quay.io/cilium/hubble-ui-backend:v0.11.0@sha256:14c04d11f78da5c363f88592abae8d2ecee3cbe009f443ef11df6ac5f692d839
+        image: quay.io/cilium/hubble-ui-backend:v0.13.1@sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b
         imagePullPolicy: IfNotPresent
         env:
         - name: EVENTS_SERVER_PORT
           value: '8090'
         - name: FLOWS_API_ADDR
           value: hubble-relay:80

Copy link

--- kubernetes/kube-system Kustomization: flux-system/1-kube-system HelmRelease: kube-system/cilium

+++ kubernetes/kube-system Kustomization: flux-system/1-kube-system HelmRelease: kube-system/cilium

@@ -11,31 +11,33 @@

   chart:
     spec:
       chart: cilium
       sourceRef:
         kind: HelmRepository
         name: cilium
-      version: 1.14.0-snapshot.4
+      version: 1.16.5
   interval: 1h
   maxHistory: 1
   values:
     cgroup:
       autoMount:
         enabled: false
       hostRoot: /sys/fs/cgroup
+    envoy:
+      enabled: false
     hubble:
       enabled: true
       relay:
         enabled: true
       ui:
         enabled: true
     ipam:
       mode: kubernetes
     k8sServiceHost: localhost
     k8sServicePort: 7745
-    kubeProxyReplacement: strict
+    kubeProxyReplacement: true
     securityContext:
       capabilities:
         ciliumAgent:
         - CHOWN
         - KILL
         - NET_ADMIN
@@ -48,7 +50,12 @@

         - SETGID
         - SETUID
         cleanCiliumState:
         - NET_ADMIN
         - SYS_ADMIN
         - SYS_RESOURCE
+    updateStrategy:
+      rollingUpdate:
+        maxSurge: 0
+        maxUnavailable: 1
+      type: RollingUpdate
 
--- kubernetes/kube-system Kustomization: flux-system/1-kube-system CiliumNetworkPolicy: kube-system/cilium-hubble-relay-policy

+++ kubernetes/kube-system Kustomization: flux-system/1-kube-system CiliumNetworkPolicy: kube-system/cilium-hubble-relay-policy

@@ -23,12 +23,23 @@

       - port: '6443'
         protocol: TCP
   endpointSelector:
     matchLabels:
       app.kubernetes.io/name: hubble-relay
 - egress:
+  - toEndpoints:
+    - matchLabels:
+        k8s-app: kube-dns
+        k8s:io.kubernetes.pod.namespace: kube-system
+    toPorts:
+    - ports:
+      - port: '53'
+        protocol: ANY
+      rules:
+        dns:
+        - matchName: hubble-peer.kube-system.svc.cluster.local.
   - toEntities:
     - host
     - remote-node
     toPorts:
     - ports:
       - port: '4244'

@timtorChen timtorChen merged commit 8d6c546 into main Jan 1, 2025
4 checks passed
@timtorChen timtorChen deleted the cilium-1.16 branch January 1, 2025 16:15
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

1 participant