From d3afae60b5a15b7591ce0a1a698b16f6b370da52 Mon Sep 17 00:00:00 2001 From: Jason Rokeach <1076569+jrokeach@users.noreply.github.com> Date: Thu, 12 Oct 2023 10:01:51 -0400 Subject: [PATCH] Convert 2regions-hrr example to use load balancers for one region rather than static NodePorts (#20) * Load balancer for Lothlorien region * Documentation update for MetalLB HRRs --- examples/2regions-hrr/README.md | 65 ++++++++++++----- examples/2regions-hrr/bgppeer-global.yml | 10 +++ examples/2regions-hrr/meshrr-lothlorien.yaml | 73 +++++++++++++++++++- 3 files changed, 128 insertions(+), 20 deletions(-) create mode 100644 examples/2regions-hrr/bgppeer-global.yml diff --git a/examples/2regions-hrr/README.md b/examples/2regions-hrr/README.md index a085a1d..d6baca2 100644 --- a/examples/2regions-hrr/README.md +++ b/examples/2regions-hrr/README.md @@ -9,16 +9,24 @@ * **Redundancy groups and anycast addressing:** * Each node is assigned to redundancy group `a` or `b`. * For each region with neighbors outside the cluster, separate DaemonSets are created for `a` and `b`, each with a unique IP address for that [meshrr_region:redundancy_group] combination. This IP address is used for iBGP peering with neighbors outside the cluster. - * Each /32 is assigned to the loopback interface of *every* node in the [meshrr_region:redundancy_group] combination, and the routers connecting to the node have a static route to the /32 that is redistributed into the IGP. For example, in the above topology: - - | Node Region | MESHRR_REGION(s) | Redundancy Group | Loopback Address(es) | - | ----------- | -------------------------- | ---------------- | ---------------------- | - | Mirkwood | mirkwood | a | 172.19.1.1 | - | Mirkwood | mirkwood | b | 172.19.1.2 | - | Core | core, mirkwood, lothlorien | a | 172.19.1.1, 172.19.2.1 | - | Core | core, mirkwood, lothlorien | b | 172.19.1.2, 172.19.2.2 | - | Lothlorien | lothlorien | a | 172.19.2.1 | - | Lothlorien | lothlorien | b | 172.19.2.2 | + * For Lothlorien: + * Kubernetes nodes run MetalLB. + * MetalLB eBGP peers to each connected router on a loopback with the same IP address (10.0.0.0). + * meshrr pods are assigned to a Kubernetes service using the MetalLB load balancer. This service is configured with `externalTrafficPolicy: local` and MetalLB announces the /32 of the service only from nodes with a corresponding meshrr pod, which will act as a route reflector for routers outside the cluster. + * This is the preferred method of distributing routes to the route reflector as it dynamically advertises based upon where route reflectors exist. + * For Mirkwood: + * Each /32 is assigned to the loopback interface of *every* node in the [meshrr_region:redundancy_group] combination. + * Routers connecting to the node have a static route to the /32 that is redistributed into the IGP. + * This method is discouraged but illustrates an alternative option. It provides no assurance that the route reflector is operational before advertising routes, as it relies on static routes from the connected router. Such a method could be enhanced using event scripts or similar, but the method used in Lothlorien provides more native dynamic routing. + * In the above topology: + | Node Region | MESHRR_REGION(s) | Redundancy Group | Loopback Address(es) | MetalLB IP(s) | + | ----------- | -------------------------- | ---------------- | ---------------------- | ------------- | + | Lothlorien | lothlorien | a | | 172.19.1.1 | + | Lothlorien | lothlorien | b | | 172.19.1.2 | + | Core | core, mirkwood, lothlorien | a | 172.19.2.1 | 172.19.1.1 | + | Core | core, mirkwood, lothlorien | b | 172.19.2.2 | 172.19.1.2 | + | Mirkwood | mirkwood | a | 172.19.2.1 | | + | Mirkwood | mirkwood | b | 172.19.2.2 | | ### Usage 1. Follow the instructions in [Quickstart](../../README.md#Quickstart) using the example YAML files in [examples/2regions-hrr](.). @@ -29,21 +37,44 @@ sudo ip address add 172.19.1.1 dev lo ``` -3. Configure your routers servicing the nodes with static routes redistributed into your IGP for the node loopback addresses. +3. Configure the Lothlorien routers connected to the nodes with: + * The anycast peering address for MetalLB to peer to on the loopback + * Hardcoded router ID (to ensure that the anycast peering address does not become the router ID) + * BGP peering + * Redistribution of RR routes from BGP to IGP + ```junos + set interfaces lo0 unit 0 family inet address 10.0.0.0/32 + set protocols bgp group RR-LOADBALANCER type external + set protocols bgp group RR-LOADBALANCER multihop ttl 2 + set protocols bgp group RR-LOADBALANCER local-address 10.0.0.0 + set protocols bgp group RR-LOADBALANCER peer-as 65001 + set protocols bgp group RR-LOADBALANCER allow 172.19.0.0/24 + set protocols bgp group RR-LOADBALANCER import NO-ADVERTISE + set protocols isis export FILTER-RRLBPEER + set protocols isis export REDISTRIBUTE-RRS + set policy-options policy-statement FILTER-RRLBPEER from protocol direct route-filter 10/32 exact + set policy-options policy-statement FILTER-RRLBPEER then reject + set policy-options community no-advertise members no-advertise + set policy-options policy-statement NO-ADVERTISE then community add no-advertise + set policy-options policy-statement REDISTRIBUTE-RRS from protocol bgp route-filter 172.19.1.0/24 prefix-length-range /32-/32 + set policy-options policy-statement REDISTRIBUTE-RRS then accept + ``` + +4. Configure the Mirkwood routers connected to the nodes with static routes redistributed into your IGP for the node loopback addresses. Configuration on the router servicing a Mirkwood A node may look like: ##### Junos ```junos routing-options { static { - route 172.0.1.1/32 next-hop ; + route 172.19.2.1/32 next-hop ; } } policy-options { policy-statement RRSTATIC-TO-ISIS { from { protocol static; - route-filter 172.19.1.1/32 exact; + route-filter 172.19.2.1/32 exact; } then accept; } @@ -59,10 +90,10 @@ Configuration on the router servicing a Mirkwood A node may look like: ```iox-xr router static address-family ipv4 unicast - 172.0.1.1/32 + 172.19.2.1/32 ! route-policy STATIC-TO-ISIS - if destination in (172.19.1.1/32) then + if destination in (172.19.2.1/32) then pass endif end-policy @@ -72,7 +103,7 @@ Configuration on the router servicing a Mirkwood A node may look like: ! ``` -4. Modify configuration templates as necessary. [`meshrr/juniper.conf.j2`](../../meshrr/juniper.conf.j2) will be loaded to all instances by default, but customizations on a per-deployment/per-daemonset basis should be performed on other J2 files (see [`mirkwood-config.j2`](templates/mirkwood-config.j2) and [`lothlorien-config.j2`](templates/lothlorien-config.j2). +5. Modify configuration templates as necessary. [`meshrr/juniper.conf.j2`](../../meshrr/juniper.conf.j2) will be loaded to all instances by default, but customizations on a per-deployment/per-daemonset basis should be performed on other J2 files (see [`mirkwood-config.j2`](templates/mirkwood-config.j2) and [`lothlorien-config.j2`](templates/lothlorien-config.j2). Apply these configuration templates as ConfigSets for any cases that require customization as so: ```bash @@ -91,4 +122,4 @@ Apply these configuration templates as ConfigSets for any cases that require cus These ConfigMaps are mounted as volumes in the corresponding DaemonSets. -5. Modify the YAML files to your needs. At the least, `` will need to be replaced to reference your private registry. Load the YAML files for the DaemonSets and Services into Kubernetes as per [Quickstart](../../README.md#Quickstart). \ No newline at end of file +6. Modify the YAML files to your needs. At the least, `` will need to be replaced to reference your private registry. Load the YAML files for the DaemonSets and Services into Kubernetes as per [Quickstart](../../README.md#Quickstart). \ No newline at end of file diff --git a/examples/2regions-hrr/bgppeer-global.yml b/examples/2regions-hrr/bgppeer-global.yml new file mode 100644 index 0000000..aaf2ea2 --- /dev/null +++ b/examples/2regions-hrr/bgppeer-global.yml @@ -0,0 +1,10 @@ +apiVersion: metallb.io/v1beta2 +kind: BGPPeer +metadata: + name: asn65000-global-lo1 + namespace: metallb +spec: + myASN: 65001 + peerASN: 65000 + peerAddress: 10.0.0.0 + ebgpMultiHop: True diff --git a/examples/2regions-hrr/meshrr-lothlorien.yaml b/examples/2regions-hrr/meshrr-lothlorien.yaml index 0c1789a..5dda168 100644 --- a/examples/2regions-hrr/meshrr-lothlorien.yaml +++ b/examples/2regions-hrr/meshrr-lothlorien.yaml @@ -1,4 +1,74 @@ --- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: meshrr-lothlorien + namespace: metallb +spec: + addresses: + - 172.19.1.1/32 + - 172.19.1.2/32 + autoAssign: false +--- +apiVersion: metallb.io/v1beta1 +kind: BGPAdvertisement +metadata: + name: meshrr-lothlorien + namespace: metallb +spec: + ipAddressPools: + - meshrr-lothlorien +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + name: meshrr-lothlorien-a + selfLink: /api/v1/namespaces/default/services/meshrr-lothlorien-a + annotations: + metallb.universe.tf/address-pool: meshrr-lothlorien +spec: + ports: + - name: bgp + port: 179 + protocol: TCP + targetPort: bgp + selector: + app: meshrr + meshrr_region_lothlorien: "true" + redundancy_group: a + sessionAffinity: None + type: LoadBalancer + loadBalancerIP: 172.19.1.1 + externalTrafficPolicy: Local +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + name: meshrr-lothlorien-b + selfLink: /api/v1/namespaces/default/services/meshrr-lothlorien-b + annotations: + metallb.universe.tf/address-pool: meshrr-lothlorien +spec: + ports: + - name: bgp + port: 179 + protocol: TCP + targetPort: bgp + selector: + app: meshrr + meshrr_region_lothlorien: "true" + redundancy_group: b + sessionAffinity: None + type: LoadBalancer + loadBalancerIP: 172.19.1.2 + externalTrafficPolicy: Local +status: + loadBalancer: {} +--- apiVersion: v1 kind: Service metadata: @@ -19,7 +89,6 @@ spec: type: ClusterIP status: loadBalancer: {} - --- apiVersion: apps/v1 kind: DaemonSet @@ -90,7 +159,6 @@ spec: containerPort: 179 protocol: TCP hostIP: 172.19.1.1 - hostPort: 179 env: - name: POD_IP valueFrom: @@ -205,7 +273,6 @@ spec: containerPort: 179 protocol: TCP hostIP: 172.19.1.2 - hostPort: 179 env: - name: POD_IP valueFrom: