Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[test] Add Cilium BGP LB e2e test #290

Merged
merged 20 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,9 @@ jobs:
- name: Run E2E Tests
run: devbox run e2e-test

- name: Run Cilium BGP e2e test
run: devbox run e2e-test-bgp

- name: Cleanup Resources
if: always()
run: devbox run cleanup-cluster
11 changes: 11 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,17 @@ e2e-test:
LINODE_TOKEN=$(LINODE_TOKEN) \
chainsaw test e2e/test --parallel 2

.PHONY: e2e-test-bgp
e2e-test-bgp:
KUBECONFIG=$(KUBECONFIG_PATH) CLUSTER_SUFFIX=$(CLUSTER_NAME) ./e2e/setup/cilium-setup.sh
KUBECONFIG=$(KUBECONFIG_PATH) kubectl -n kube-system rollout status daemonset/ccm-linode --timeout=300s
CLUSTER_NAME=$(CLUSTER_NAME) \
MGMT_KUBECONFIG=$(MGMT_KUBECONFIG_PATH) \
KUBECONFIG=$(KUBECONFIG_PATH) \
REGION=$(LINODE_REGION) \
LINODE_TOKEN=$(LINODE_TOKEN) \
chainsaw test e2e/lb-cilium-bgp

#####################################################################
# OS / ARCH
#####################################################################
Expand Down
1 change: 1 addition & 0 deletions devbox.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
"scripts": {
"mgmt-and-capl-cluster": "make mgmt-and-capl-cluster",
"e2e-test": "make e2e-test",
"e2e-test-bgp": "make e2e-test-bgp",
"cleanup-cluster": "make cleanup-cluster"
}
},
Expand Down
126 changes: 126 additions & 0 deletions e2e/lb-cilium-bgp/chainsaw-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
name: cilium-bgp-test
spec:
namespace: "cilium-bgp-test"
steps:
- name: Check if CCM is deployed
try:
- assert:
file: ../test/assert-ccm-resources.yaml
- name: Create a pod and service with load balancer type cilium-bgp
try:
- apply:
file: create-pod-service.yaml
catch:
- describe:
apiVersion: v1
kind: Pod
- describe:
apiVersion: v1
kind: Service
- name: Verify CiliumLoadBalancerIPPool creation
try:
- assert:
resource:
apiVersion: cilium.io/v2alpha1
kind: CiliumLoadBalancerIPPool
metadata:
name: cilium-bgp-test-test-bgp-svc-pool
spec:
disabled: false
- name: Verify CiliumBGPPeeringPolicy
try:
- assert:
resource:
apiVersion: cilium.io/v2alpha1
kind: CiliumBGPPeeringPolicy
metadata:
name: linode-ccm-bgp-peering
spec:
nodeSelector:
matchLabels:
cilium-bgp-peering: "true"
- name: Check LoadBalancer IP assignment
try:
- assert:
resource:
apiVersion: v1
kind: Service
metadata:
name: test-bgp-svc
status:
conditions:
- status: "True"
type: cilium.io/IPAMRequestSatisfied
- name: Verify IP sharing on labeled nodes
try:
- script:
content: |
set -e

# Get the LoadBalancer IP
LB_IP=$(kubectl get svc test-bgp-svc -n cilium-bgp-test -o jsonpath='{.status.loadBalancer.ingress[0].ip}')

# Get nodes with BGP label
BGP_NODES=$(kubectl get nodes -l cilium-bgp-peering=true -o name)

if [ -z "$BGP_NODES" ]; then
echo "No nodes found with label cilium-bgp-peering=true"
exit 1
fi

# Check if IP is shared on each BGP node
for node in $BGP_NODES; do
NODE_ID=$(kubectl get $node -o jsonpath='{.spec.providerID}' | sed 's|linode://||')
echo "Node ID: $NODE_ID"

# Get node IPs with error handling
NODE_IP_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \
"https://api.linode.com/v4/linode/instances/$NODE_ID/ips")

# Check if shared IPs exist, if not use empty array
SHARED_IPS=$(echo "$NODE_IP_RESPONSE" | jq -r '.ipv4.shared[]?.address // empty')
echo "shared IPs: $SHARED_IPS"

if [ -n "$SHARED_IPS" ] && ! echo "$SHARED_IPS" | grep -q "$LB_IP"; then
echo "LoadBalancer IP $LB_IP not found in shared IPs of node $node"
exit 1
fi
done

# Check if the nanode has the shared IP
NANODE_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \
"https://api.linode.com/v4/linode/instances")

NANODE_ID=$(echo "$NANODE_RESPONSE" | \
jq -r --arg cluster "$CLUSTER_NAME" '.data[] | select(.label | endswith($cluster)) | .id')

if [ -z "$NANODE_ID" ]; then
echo "No nanode found for cluster $CLUSTER_NAME"
exit 0
fi

NANODE_IP_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \
"https://api.linode.com/v4/linode/instances/$NANODE_ID/ips")

NANODE_IPS=$(echo "$NANODE_IP_RESPONSE" | jq -r '.ipv4.public[]?.address // empty')

if [ -n "$NANODE_IPS" ] && ! echo "$NANODE_IPS" | grep -q "$LB_IP"; then
echo "LoadBalancer IP not found in nanode IPs"
komer3 marked this conversation as resolved.
Show resolved Hide resolved
exit 1
fi

echo "Successfully found LoadBalancer IP in nanode IPs"

# Delete the nanode
curl -s -X DELETE -H "Authorization: Bearer $LINODE_TOKEN" \
"https://api.linode.com/v4/linode/instances/$NANODE_ID"
check:
($error == null): true
(contains($stdout, 'LoadBalancer IP not found in shared IPs of node')): false
(contains($stdout, 'LoadBalancer IP not found in nanode IPs')): false
(contains($stdout, 'Successfully found LoadBalancer IP in nanode IPs')): true

24 changes: 24 additions & 0 deletions e2e/lb-cilium-bgp/create-pod-service.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
apiVersion: v1
kind: Pod
metadata:
name: test-pod-1
labels:
app: test-bgp
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: test-bgp-svc
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 80
selector:
app: test-bgp
32 changes: 32 additions & 0 deletions e2e/setup/cilium-setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#!/bin/bash
set -euo pipefail

# Add bgp peering label to non control plane nodes. Needed to update the shared IP on the nodes
kubectl get nodes --no-headers | grep -v control-plane |\
awk '{print $1}' | xargs -I {} kubectl label nodes {} cilium-bgp-peering=true --overwrite

# Add RBAC permissions
kubectl patch clusterrole ccm-linode-clusterrole --type='json' -p='[{
"op": "add",
"path": "/rules/-",
"value": {
"apiGroups": ["cilium.io"],
"resources": ["ciliumloadbalancerippools", "ciliumbgppeeringpolicies"],
"verbs": ["get", "list", "watch", "create", "update", "patch", "delete"]
}
}]'

# Patch DaemonSet
kubectl patch daemonset ccm-linode -n kube-system --type='json' -p='[{
"op": "add",
"path": "/spec/template/spec/containers/0/args/-",
"value": "--bgp-node-selector=cilium-bgp-peering=true"
}, {
"op": "add",
"path": "/spec/template/spec/containers/0/args/-",
"value": "--load-balancer-type=cilium-bgp"
}, {
"op": "add",
"path": "/spec/template/spec/containers/0/args/-",
"value": "--ip-holder-suffix='"${CLUSTER_SUFFIX}"'"
}]'
Loading