From b727b179e1c2fd5c451a3581d88f16c39447ee08 Mon Sep 17 00:00:00 2001 From: donghaobo Date: Fri, 4 Sep 2020 12:07:25 +0000 Subject: [PATCH 01/35] icmp traceroute --- src/ipv4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ipv4.c b/src/ipv4.c index 83b5158b4..113b041f7 100644 --- a/src/ipv4.c +++ b/src/ipv4.c @@ -533,7 +533,7 @@ int ipv4_xmit(struct rte_mbuf *mbuf, const struct flow4 *fl4) struct route_entry *rt; struct ipv4_hdr *iph; - if (!mbuf || !fl4 || fl4->fl4_saddr.s_addr == htonl(INADDR_ANY)) { + if (!mbuf || !fl4) { if (mbuf) rte_pktmbuf_free(mbuf); return EDPVS_INVAL; From 4eee7b63f4c5ae81bdddfb253c24ac017ed2b988 Mon Sep 17 00:00:00 2001 From: ytwang0320 Date: Tue, 1 Dec 2020 20:04:33 +0800 Subject: [PATCH 02/35] added whitelist --- README.md | 2 +- include/conf/conn.h | 2 +- include/conf/sockopts.h | 6 + include/conf/whtlst.h | 47 ++ include/ctrl.h | 4 +- include/ipvs/whtlst.h | 41 ++ src/ipvs/ip_vs_core.c | 13 + src/ipvs/ip_vs_proto_tcp.c | 6 + src/ipvs/ip_vs_proto_udp.c | 7 + src/ipvs/ip_vs_service.c | 3 + src/ipvs/ip_vs_synproxy.c | 6 + src/ipvs/ip_vs_whtlst.c | 459 ++++++++++++++++++ tools/ipvsadm/ipvsadm.c | 186 ++++++- .../keepalived/keepalived/check/check_data.c | 75 +++ .../keepalived/check/check_parser.c | 17 + .../keepalived/keepalived/check/ipvswrapper.c | 324 +++++++++++-- tools/keepalived/keepalived/check/ipwrapper.c | 76 ++- tools/keepalived/keepalived/check/libipvs.c | 78 ++- .../keepalived/include/check_data.h | 17 + tools/keepalived/keepalived/include/dp_vs.h | 1 + tools/keepalived/keepalived/include/ip_vs.h | 10 +- .../keepalived/include/ipvswrapper.h | 2 + .../keepalived/keepalived/include/ipwrapper.h | 2 + tools/keepalived/keepalived/include/libipvs.h | 11 +- 24 files changed, 1325 insertions(+), 70 deletions(-) create mode 100644 include/conf/whtlst.h create mode 100644 include/ipvs/whtlst.h create mode 100644 src/ipvs/ip_vs_whtlst.c mode change 100644 => 100755 tools/keepalived/keepalived/check/ipvswrapper.c mode change 100644 => 100755 tools/keepalived/keepalived/check/ipwrapper.c diff --git a/README.md b/README.md index 9656ac1f9..c1dc72b1f 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Major features of `DPVS` including: * User-space *Lite IP stack* (IPv4, Routing, ARP, ICMP ...). * *SNAT* mode for Internet access from internal network. * Support *KNI*, *VLAN*, *Bonding* for different IDC environment. -* Security aspect, support *TCP syn-proxy*, *Conn-Limit*, *black-list*. +* Security aspect, support *TCP syn-proxy*, *Conn-Limit*, *black-list*, *white-list*. * QoS: *Traffic Control*. `DPVS` feature modules are illustrated as following picture. diff --git a/include/conf/conn.h b/include/conf/conn.h index 6a58d53ae..0f48d411e 100644 --- a/include/conf/conn.h +++ b/include/conf/conn.h @@ -84,4 +84,4 @@ struct ip_vs_conn_array { ipvs_conn_entry_t array[0]; } __attribute__((__packed__)); -#endif /* __DPVS_BLKLST_CONF_H__ */ +#endif /* __DPVS_CONN_CONF_H__ */ diff --git a/include/conf/sockopts.h b/include/conf/sockopts.h index 558581ecb..f3544c8ca 100644 --- a/include/conf/sockopts.h +++ b/include/conf/sockopts.h @@ -117,6 +117,12 @@ enum { SOCKOPT_TUNNEL_REPLACE, SOCKOPT_TUNNEL_SHOW = 1200, + /* whtlst */ + SOCKOPT_SET_WHTLST_ADD = 1300, + SOCKOPT_SET_WHTLST_DEL, + SOCKOPT_SET_WHTLST_FLUSH, + SOCKOPT_GET_WHTLST_GETALL = 1300, + /* ipset */ SOCKOPT_SET_IPSET_ADD = 3300, SOCKOPT_SET_IPSET_DEL, diff --git a/include/conf/whtlst.h b/include/conf/whtlst.h new file mode 100644 index 000000000..074b55c72 --- /dev/null +++ b/include/conf/whtlst.h @@ -0,0 +1,47 @@ +/* + * DPVS is a software load balancer (Virtual Server) based on DPDK. + * + * Copyright (C) 2017 iQIYI (www.iqiyi.com). + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +/** + * Note: control plane only + * based on dpvs_sockopt. + */ +#ifndef __DPVS_WHTLST_CONF_H__ +#define __DPVS_WHTLST_CONF_H__ +#include "inet.h" +#include "conf/sockopts.h" +struct dp_vs_whtlst_entry { + union inet_addr addr; +}; + +struct dp_vs_whtlst_conf { + /* identify service */ + int af; + uint8_t proto; + union inet_addr vaddr; + uint16_t vport; + uint32_t fwmark; + + /* for set */ + union inet_addr whtlst; +}; + +struct dp_vs_whtlst_conf_array { + int naddr; + struct dp_vs_whtlst_conf whtlsts[0]; +} __attribute__((__packed__)); + +#endif /* __DPVS_WHTLST_CONF_H__ */ diff --git a/include/ctrl.h b/include/ctrl.h index b34851f40..abd4350fc 100644 --- a/include/ctrl.h +++ b/include/ctrl.h @@ -207,10 +207,12 @@ int msg_dump(const struct dpvs_msg *msg, char *buf, int len); #define MSG_TYPE_IFA_GET 22 #define MSG_TYPE_IFA_SET 23 #define MSG_TYPE_IFA_SYNC 24 +#define MSG_TYPE_WHTLST_ADD 25 +#define MSG_TYPE_WHTLST_DEL 26 /* for svc per_core, refer to service.h*/ enum { - MSG_TYPE_SVC_SET_FLUSH = MSG_TYPE_IFA_SYNC + 1, + MSG_TYPE_SVC_SET_FLUSH = MSG_TYPE_WHTLST_DEL + 1, MSG_TYPE_SVC_SET_ZERO, MSG_TYPE_SVC_SET_ADD, MSG_TYPE_SVC_SET_EDIT, diff --git a/include/ipvs/whtlst.h b/include/ipvs/whtlst.h new file mode 100644 index 000000000..007967ce6 --- /dev/null +++ b/include/ipvs/whtlst.h @@ -0,0 +1,41 @@ +/* + * DPVS is a software load balancer (Virtual Server) based on DPDK. + * + * Copyright (C) 2017 iQIYI (www.iqiyi.com). + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __DPVS_WHTLST_H__ +#define __DPVS_WHTLST_H__ +#include "conf/common.h" +#include "ipvs/service.h" +#include "timer.h" + +struct whtlst_entry { + struct list_head list; + union inet_addr vaddr; + uint16_t vport; + uint8_t proto; + union inet_addr whtlst; +}; + +struct whtlst_entry *dp_vs_whtlst_lookup(uint8_t proto, const union inet_addr *vaddr, + uint16_t vport, const union inet_addr *whtlst); +bool dp_vs_whtlst_allow(uint8_t proto, const union inet_addr *vaddr, + uint16_t vport, const union inet_addr *whtlst); +void dp_vs_whtlst_flush(struct dp_vs_service *svc); + +int dp_vs_whtlst_init(void); +int dp_vs_whtlst_term(void); + +#endif /* __DPVS_WHTLST_H__ */ diff --git a/src/ipvs/ip_vs_core.c b/src/ipvs/ip_vs_core.c index 6e40006e9..2763e553c 100644 --- a/src/ipvs/ip_vs_core.c +++ b/src/ipvs/ip_vs_core.c @@ -34,6 +34,7 @@ #include "ipvs/xmit.h" #include "ipvs/synproxy.h" #include "ipvs/blklst.h" +#include "ipvs/whtlst.h" #include "ipvs/proto_udp.h" #include "route6.h" #include "ipvs/redirect.h" @@ -1211,6 +1212,12 @@ int dp_vs_init(void) goto err_blklst; } + err = dp_vs_whtlst_init(); + if (err != EDPVS_OK) { + RTE_LOG(ERR, IPVS, "fail to init whtlst: %s\n", dpvs_strerror(err)); + goto err_whtlst; + } + err = dp_vs_stats_init(); if (err != EDPVS_OK) { RTE_LOG(ERR, IPVS, "fail to init stats: %s\n", dpvs_strerror(err)); @@ -1231,6 +1238,8 @@ int dp_vs_init(void) err_stats: dp_vs_blklst_term(); err_blklst: + dp_vs_whtlst_term(); +err_whtlst: dp_vs_service_term(); err_serv: dp_vs_sched_term(); @@ -1264,6 +1273,10 @@ int dp_vs_term(void) if (err != EDPVS_OK) RTE_LOG(ERR, IPVS, "fail to terminate blklst: %s\n", dpvs_strerror(err)); + err = dp_vs_whtlst_term(); + if (err != EDPVS_OK) + RTE_LOG(ERR, IPVS, "fail to terminate whtlst: %s\n", dpvs_strerror(err)); + err = dp_vs_service_term(); if (err != EDPVS_OK) RTE_LOG(ERR, IPVS, "fail to terminate serv: %s\n", dpvs_strerror(err)); diff --git a/src/ipvs/ip_vs_proto_tcp.c b/src/ipvs/ip_vs_proto_tcp.c index 287a9ac59..ab8c6af21 100644 --- a/src/ipvs/ip_vs_proto_tcp.c +++ b/src/ipvs/ip_vs_proto_tcp.c @@ -31,6 +31,7 @@ #include "ipvs/dest.h" #include "ipvs/synproxy.h" #include "ipvs/blklst.h" +#include "ipvs/whtlst.h" #include "parser/parser.h" /* we need more detailed fields than dpdk tcp_hdr{}, * like tcphdr.syn, so use standard definition. */ @@ -644,6 +645,11 @@ tcp_conn_lookup(struct dp_vs_proto *proto, const struct dp_vs_iphdr *iph, return NULL; } + if (!dp_vs_whtlst_allow(iph->proto, &iph->daddr, th->dest, &iph->saddr)) { + *drop = true; + return NULL; + } + conn = dp_vs_conn_get(iph->af, iph->proto, &iph->saddr, &iph->daddr, th->source, th->dest, direct, reverse); diff --git a/src/ipvs/ip_vs_proto_udp.c b/src/ipvs/ip_vs_proto_udp.c index 10b60a3fa..1f77b53ab 100644 --- a/src/ipvs/ip_vs_proto_udp.c +++ b/src/ipvs/ip_vs_proto_udp.c @@ -29,6 +29,7 @@ #include "ipvs/conn.h" #include "ipvs/service.h" #include "ipvs/blklst.h" +#include "ipvs/whtlst.h" #include "ipvs/redirect.h" #include "parser/parser.h" #include "uoa.h" @@ -212,6 +213,12 @@ udp_conn_lookup(struct dp_vs_proto *proto, return NULL; } + if (!dp_vs_whtlst_allow(iph->proto, &iph->daddr, uh->dst_port, + &iph->saddr)) { + *drop = true; + return NULL; + } + conn = dp_vs_conn_get(iph->af, iph->proto, &iph->saddr, &iph->daddr, uh->src_port, uh->dst_port, diff --git a/src/ipvs/ip_vs_service.c b/src/ipvs/ip_vs_service.c index 41f03404f..2dd6a14d3 100644 --- a/src/ipvs/ip_vs_service.c +++ b/src/ipvs/ip_vs_service.c @@ -29,6 +29,7 @@ #include "ipvs/sched.h" #include "ipvs/laddr.h" #include "ipvs/blklst.h" +#include "ipvs/whtlst.h" #include "ctrl.h" #include "route.h" #include "route6.h" @@ -639,6 +640,8 @@ static void __dp_vs_service_del(struct dp_vs_service *svc) dp_vs_blklst_flush(svc); + dp_vs_whtlst_flush(svc); + /* * Unlink the whole destination list */ diff --git a/src/ipvs/ip_vs_synproxy.c b/src/ipvs/ip_vs_synproxy.c index 03e7c88bb..fceb2b262 100644 --- a/src/ipvs/ip_vs_synproxy.c +++ b/src/ipvs/ip_vs_synproxy.c @@ -29,6 +29,7 @@ #include "ipvs/proto.h" #include "ipvs/proto_tcp.h" #include "ipvs/blklst.h" +#include "ipvs/whtlst.h" #include "parser/parser.h" /* synproxy controll variables */ @@ -704,6 +705,11 @@ int dp_vs_synproxy_syn_rcv(int af, struct rte_mbuf *mbuf, th->dest, &iph->saddr)) { goto syn_rcv_out; } + + /* drop packet if not in whitelist */ + if (!dp_vs_whtlst_allow(iph->proto, &iph->daddr, th->dest, &iph->saddr)) { + goto syn_rcv_out; + } } else { return 1; } diff --git a/src/ipvs/ip_vs_whtlst.c b/src/ipvs/ip_vs_whtlst.c new file mode 100644 index 000000000..5a2209ef8 --- /dev/null +++ b/src/ipvs/ip_vs_whtlst.c @@ -0,0 +1,459 @@ +/* + * DPVS is a software load balancer (Virtual Server) based on DPDK. + * + * Copyright (C) 2017 iQIYI (www.iqiyi.com). + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include "dpdk.h" +#include "list.h" +#include "conf/common.h" +#include "netif.h" +#include "inet.h" +#include "ctrl.h" +#include "ipvs/ipvs.h" +#include "ipvs/service.h" +#include "ipvs/whtlst.h" +#include "conf/whtlst.h" + +/** + * * per-lcore config for whtlst ip + * */ + + +#define DPVS_WHTLST_TAB_BITS 16 +#define DPVS_WHTLST_TAB_SIZE (1 << DPVS_WHTLST_TAB_BITS) +#define DPVS_WHTLST_TAB_MASK (DPVS_WHTLST_TAB_SIZE - 1) + +#define this_whtlst_tab (RTE_PER_LCORE(dp_vs_whtlst_tab)) +#define this_num_whtlsts (RTE_PER_LCORE(num_whtlsts)) + +static RTE_DEFINE_PER_LCORE(struct list_head *, dp_vs_whtlst_tab); +static RTE_DEFINE_PER_LCORE(rte_atomic32_t, num_whtlsts); + +static uint32_t dp_vs_whtlst_rnd; + +static inline uint32_t whtlst_hashkey(const uint8_t proto, const union inet_addr *vaddr, const uint16_t vport) +{ + /* jhash hurts performance, we do not use rte_jhash_2words here */ + return (((rte_be_to_cpu_16(proto) * 7 + + rte_be_to_cpu_32(vaddr->in.s_addr)) * 31 + + rte_be_to_cpu_16(vport)) * 15 + + dp_vs_whtlst_rnd) & DPVS_WHTLST_TAB_MASK; +} + +struct whtlst_entry *dp_vs_whtlst_lookup(uint8_t proto, const union inet_addr *vaddr, + uint16_t vport, const union inet_addr *whtlst) +{ + unsigned hashkey; + struct whtlst_entry *whtlst_node; + + hashkey = whtlst_hashkey(proto, vaddr, vport); + list_for_each_entry(whtlst_node, &this_whtlst_tab[hashkey], list){ + if (whtlst_node->vaddr.in.s_addr == vaddr->in.s_addr && + whtlst_node->whtlst.in.s_addr == whtlst->in.s_addr && + whtlst_node->proto == proto && + whtlst_node->vport == vport) + return whtlst_node; + } + return NULL; +} + +bool dp_vs_whtlst_allow(uint8_t proto, const union inet_addr *vaddr, + uint16_t vport, const union inet_addr *whtlst) +{ + unsigned hashkey; + struct whtlst_entry *whtlst_node; + + hashkey = whtlst_hashkey(proto, vaddr, vport); + + if (&this_whtlst_tab[hashkey] == NULL || list_empty(&this_whtlst_tab[hashkey])) { + return true; + } + + list_for_each_entry(whtlst_node, &this_whtlst_tab[hashkey], list){ + if (whtlst_node->vaddr.in.s_addr == vaddr->in.s_addr && + whtlst_node->whtlst.in.s_addr == whtlst->in.s_addr && + whtlst_node->proto == proto && + whtlst_node->vport == vport) + return true; + } + + return false; +} + +static int dp_vs_whtlst_add_lcore(uint8_t proto, const union inet_addr *vaddr, + uint16_t vport, const union inet_addr *whtlst) +{ + unsigned hashkey; + struct whtlst_entry *new, *whtlst_node; + whtlst_node = dp_vs_whtlst_lookup(proto, vaddr, vport, whtlst); + if (whtlst_node) { + return EDPVS_EXIST; + } + + hashkey = whtlst_hashkey(proto, vaddr, vport); + + new = rte_zmalloc("new_whtlst_entry", sizeof(struct whtlst_entry), 0); + if (new == NULL) + return EDPVS_NOMEM; + + memcpy(&new->vaddr, vaddr,sizeof(union inet_addr)); + new->vport = vport; + new->proto = proto; + memcpy(&new->whtlst, whtlst,sizeof(union inet_addr)); + list_add(&new->list, &this_whtlst_tab[hashkey]); + rte_atomic32_inc(&this_num_whtlsts); + + return EDPVS_OK; +} + +static int dp_vs_whtlst_del_lcore(uint8_t proto, const union inet_addr *vaddr, + uint16_t vport, const union inet_addr *whtlst) +{ + struct whtlst_entry *whtlst_node; + + whtlst_node = dp_vs_whtlst_lookup(proto, vaddr, vport, whtlst); + if (whtlst_node != NULL) { + list_del(&whtlst_node->list); + rte_free(whtlst_node); + rte_atomic32_dec(&this_num_whtlsts); + return EDPVS_OK; + } + return EDPVS_NOTEXIST; +} + +static int dp_vs_whtlst_add(uint8_t proto, const union inet_addr *vaddr, + uint16_t vport, const union inet_addr *whtlst) +{ + lcoreid_t cid = rte_lcore_id(); + int err; + struct dpvs_msg *msg; + struct dp_vs_whtlst_conf cf; + + if (cid != rte_get_master_lcore()) { + RTE_LOG(INFO, SERVICE, "[%s] must set from master lcore\n", __func__); + return EDPVS_NOTSUPP; + } + + memset(&cf, 0, sizeof(struct dp_vs_whtlst_conf)); + memcpy(&(cf.vaddr), vaddr,sizeof(union inet_addr)); + memcpy(&(cf.whtlst), whtlst, sizeof(union inet_addr)); + cf.vport = vport; + cf.proto = proto; + + /*set whtlst ip on master lcore*/ + err = dp_vs_whtlst_add_lcore(proto, vaddr, vport, whtlst); + if (err) { + RTE_LOG(INFO, SERVICE, "[%s] fail to set whtlst ip\n", __func__); + return err; + } + + /*set whtlst ip on all slave lcores*/ + msg = msg_make(MSG_TYPE_WHTLST_ADD, 0, DPVS_MSG_MULTICAST, + cid, sizeof(struct dp_vs_whtlst_conf), &cf); + if (!msg) + return EDPVS_NOMEM; + err = multicast_msg_send(msg, 0, NULL); + if (err != EDPVS_OK) { + msg_destroy(&msg); + RTE_LOG(INFO, SERVICE, "[%s] fail to send multicast message\n", __func__); + return err; + } + msg_destroy(&msg); + + return EDPVS_OK; +} + +static int dp_vs_whtlst_del(uint8_t proto, const union inet_addr *vaddr, + uint16_t vport, const union inet_addr *whtlst) +{ + lcoreid_t cid = rte_lcore_id(); + int err; + struct dpvs_msg *msg; + struct dp_vs_whtlst_conf cf; + + if (cid != rte_get_master_lcore()) { + RTE_LOG(INFO, SERVICE, "[%s] must set from master lcore\n", __func__); + return EDPVS_NOTSUPP; + } + + memset(&cf, 0, sizeof(struct dp_vs_whtlst_conf)); + memcpy(&(cf.vaddr), vaddr,sizeof(union inet_addr)); + memcpy(&(cf.whtlst), whtlst, sizeof(union inet_addr)); + cf.vport = vport; + cf.proto = proto; + + /*del whtlst ip on master lcores*/ + err = dp_vs_whtlst_del_lcore(proto, vaddr, vport, whtlst); + if (err) { + RTE_LOG(INFO, SERVICE, "[%s] fail to del whtlst ip\n", __func__); + return err; + } + + /*del whtlst ip on all slave lcores*/ + msg = msg_make(MSG_TYPE_WHTLST_DEL, 0, DPVS_MSG_MULTICAST, + cid, sizeof(struct dp_vs_whtlst_conf), &cf); + if (!msg) + return EDPVS_NOMEM; + err = multicast_msg_send(msg, 0, NULL); + if (err != EDPVS_OK) { + RTE_LOG(INFO, SERVICE, "[%s] fail to send multicast message\n", __func__); + return err; + } + msg_destroy(&msg); + + return EDPVS_OK; +} + +void dp_vs_whtlst_flush(struct dp_vs_service *svc) +{ + struct whtlst_entry *entry, *next; + int hash; + + for (hash = 0; hash < DPVS_WHTLST_TAB_SIZE; hash++) { + list_for_each_entry_safe(entry, next, &this_whtlst_tab[hash], list) { + if (entry->vaddr.in.s_addr == svc->addr.in.s_addr) + dp_vs_whtlst_del(entry->proto, &entry->vaddr, + entry->vport, &entry->whtlst); + } + } + return; +} + +static void dp_vs_whtlst_flush_all(void) +{ + struct whtlst_entry *entry, *next; + int hash; + + for (hash = 0; hash < DPVS_WHTLST_TAB_SIZE; hash++) { + list_for_each_entry_safe(entry, next, &this_whtlst_tab[hash], list) { + dp_vs_whtlst_del(entry->proto, &entry->vaddr, + entry->vport, &entry->whtlst); + } + } + return; +} + +/* + * for control plane + */ +static int whtlst_sockopt_set(sockoptid_t opt, const void *conf, size_t size) +{ + const struct dp_vs_whtlst_conf *whtlst_conf = conf; + int err; + + if (!conf && size < sizeof(*whtlst_conf)) + return EDPVS_INVAL; + + switch (opt) { + case SOCKOPT_SET_WHTLST_ADD: + err = dp_vs_whtlst_add(whtlst_conf->proto, &whtlst_conf->vaddr, + whtlst_conf->vport, &whtlst_conf->whtlst); + break; + case SOCKOPT_SET_WHTLST_DEL: + err = dp_vs_whtlst_del(whtlst_conf->proto, &whtlst_conf->vaddr, + whtlst_conf->vport, &whtlst_conf->whtlst); + break; + default: + err = EDPVS_NOTSUPP; + break; + } + + return err; +} + +static void whtlst_fill_conf(int af, struct dp_vs_whtlst_conf *cf, + const struct whtlst_entry *entry) +{ + memset(cf, 0 ,sizeof(*cf)); + cf->af = af; + cf->vaddr = entry->vaddr; + cf->whtlst = entry->whtlst; + cf->proto = entry->proto; + cf->vport = entry->vport; +} + +static int whtlst_sockopt_get(sockoptid_t opt, const void *conf, size_t size, + void **out, size_t *outsize) +{ + struct dp_vs_whtlst_conf_array *array; + struct whtlst_entry *entry; + size_t naddr, hash; + int off = 0; + + naddr = rte_atomic32_read(&this_num_whtlsts); + *outsize = sizeof(struct dp_vs_whtlst_conf_array) + + naddr * sizeof(struct dp_vs_whtlst_conf); + *out = rte_calloc_socket(NULL, 1, *outsize, 0, rte_socket_id()); + if (!(*out)) + return EDPVS_NOMEM; + array = *out; + array->naddr = naddr; + + for (hash = 0; hash < DPVS_WHTLST_TAB_SIZE; hash++) { + list_for_each_entry(entry, &this_whtlst_tab[hash], list) { + if (off >= naddr) + break; + whtlst_fill_conf(AF_INET, &array->whtlsts[off++], entry); + } + } + + return EDPVS_OK; +} + + +static int whtlst_msg_process(bool add, struct dpvs_msg *msg) +{ + struct dp_vs_whtlst_conf *cf; + int err; + assert(msg); + + if (msg->len != sizeof(struct dp_vs_whtlst_conf)){ + RTE_LOG(ERR, SERVICE, "%s: bad message.\n", __func__); + return EDPVS_INVAL; + } + + cf = (struct dp_vs_whtlst_conf *)msg->data; + if (add) + err = dp_vs_whtlst_add_lcore(cf->proto, &cf->vaddr, cf->vport, &cf->whtlst); + else + err = dp_vs_whtlst_del_lcore(cf->proto, &cf->vaddr, cf->vport, &cf->whtlst); + if (err != EDPVS_OK) + RTE_LOG(ERR, SERVICE, "%s: fail to %s whtlst: %s.\n", + __func__, add ? "add" : "del", dpvs_strerror(err)); + + return err; +} + +inline static int whtlst_add_msg_cb(struct dpvs_msg *msg) +{ + return whtlst_msg_process(true, msg); +} + +inline static int whtlst_del_msg_cb(struct dpvs_msg *msg) +{ + return whtlst_msg_process(false, msg); +} + +static struct dpvs_sockopts whtlst_sockopts = { + .version = SOCKOPT_VERSION, + .set_opt_min = SOCKOPT_SET_WHTLST_ADD, + .set_opt_max = SOCKOPT_SET_WHTLST_FLUSH, + .set = whtlst_sockopt_set, + .get_opt_min = SOCKOPT_GET_WHTLST_GETALL, + .get_opt_max = SOCKOPT_GET_WHTLST_GETALL, + .get = whtlst_sockopt_get, +}; + +static int whtlst_lcore_init(void *args) +{ + int i; + if (!rte_lcore_is_enabled(rte_lcore_id())) + return EDPVS_DISABLED; + this_whtlst_tab = rte_malloc_socket(NULL, + sizeof(struct list_head) * DPVS_WHTLST_TAB_SIZE, + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (!this_whtlst_tab) + return EDPVS_NOMEM; + + for (i = 0; i < DPVS_WHTLST_TAB_SIZE; i++) + INIT_LIST_HEAD(&this_whtlst_tab[i]); + + return EDPVS_OK; +} + +static int whtlst_lcore_term(void *args) +{ + if (!rte_lcore_is_enabled(rte_lcore_id())) + return EDPVS_DISABLED; + + dp_vs_whtlst_flush_all(); + + if (this_whtlst_tab) { + rte_free(this_whtlst_tab); + this_whtlst_tab = NULL; + } + return EDPVS_OK; +} + +int dp_vs_whtlst_init(void) +{ + int err; + lcoreid_t cid; + struct dpvs_msg_type msg_type; + + rte_atomic32_set(&this_num_whtlsts, 0); + + rte_eal_mp_remote_launch(whtlst_lcore_init, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(cid) { + if ((err = rte_eal_wait_lcore(cid)) < 0) { + RTE_LOG(WARNING, SERVICE, "%s: lcore %d: %s.\n", + __func__, cid, dpvs_strerror(err)); + return err; + } + } + + memset(&msg_type, 0, sizeof(struct dpvs_msg_type)); + msg_type.type = MSG_TYPE_WHTLST_ADD; + msg_type.mode = DPVS_MSG_MULTICAST; + msg_type.prio = MSG_PRIO_NORM; + msg_type.cid = rte_lcore_id(); + msg_type.unicast_msg_cb = whtlst_add_msg_cb; + err = msg_type_mc_register(&msg_type); + if (err != EDPVS_OK) { + RTE_LOG(ERR, SERVICE, "%s: fail to register msg.\n", __func__); + return err; + } + + memset(&msg_type, 0, sizeof(struct dpvs_msg_type)); + msg_type.type = MSG_TYPE_WHTLST_DEL; + msg_type.mode = DPVS_MSG_MULTICAST; + msg_type.prio = MSG_PRIO_NORM; + msg_type.cid = rte_lcore_id(); + msg_type.unicast_msg_cb = whtlst_del_msg_cb; + err = msg_type_mc_register(&msg_type); + if (err != EDPVS_OK) { + RTE_LOG(ERR, SERVICE, "%s: fail to register msg.\n", __func__); + return err; + } + + if ((err = sockopt_register(&whtlst_sockopts)) != EDPVS_OK) + return err; + dp_vs_whtlst_rnd = (uint32_t)random(); + + return EDPVS_OK; +} + +int dp_vs_whtlst_term(void) +{ + int err; + lcoreid_t cid; + + if ((err = sockopt_unregister(&whtlst_sockopts)) != EDPVS_OK) + return err; + + rte_eal_mp_remote_launch(whtlst_lcore_term, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(cid) { + if ((err = rte_eal_wait_lcore(cid)) < 0) { + RTE_LOG(WARNING, SERVICE, "%s: lcore %d: %s.\n", + __func__, cid, dpvs_strerror(err)); + } + } + + return EDPVS_OK; +} diff --git a/tools/ipvsadm/ipvsadm.c b/tools/ipvsadm/ipvsadm.c index f4267d4a7..1d3383140 100644 --- a/tools/ipvsadm/ipvsadm.c +++ b/tools/ipvsadm/ipvsadm.c @@ -144,7 +144,10 @@ #define CMD_ADDBLKLST (CMD_NONE+18) #define CMD_DELBLKLST (CMD_NONE+19) #define CMD_GETBLKLST (CMD_NONE+20) -#define CMD_MAX CMD_GETBLKLST +#define CMD_ADDWHTLST (CMD_NONE+21) +#define CMD_DELWHTLST (CMD_NONE+22) +#define CMD_GETWHTLST (CMD_NONE+23) +#define CMD_MAX CMD_GETWHTLST #define NUMBER_OF_CMD (CMD_MAX - CMD_NONE) static const char* cmdnames[] = { @@ -168,6 +171,9 @@ static const char* cmdnames[] = { "add-blklst", "del-blklst", "get-blklst", + "add-whtlst", + "del-whtlst", + "get-whtlst", }; static const char* optnames[] = { @@ -196,12 +202,14 @@ static const char* optnames[] = { "pe" , "local-address" , "blklst-address", + "whtlst-address", "synproxy" , "ifname" , "sockpair" , "hash-target", "cpu", "expire-quiescent", + "wlst", }; /* @@ -214,47 +222,53 @@ static const char* optnames[] = { */ static const char commands_v_options[NUMBER_OF_CMD][NUMBER_OF_OPT] = { -/* -n -c svc -s -p -M -r fwd -w -x -y -mc tot dmn -st -rt thr -pc srt sid -ex ops pe laddr blst syn ifname sockpair hashtag cpu expire-quiescent*/ +/* -n -c svc -s -p -M -r fwd -w -x -y -mc tot dmn -st -rt thr -pc srt sid -ex ops pe laddr blst syn ifname sockpair hashtag cpu expire-quiescent wlst*/ /*ADD*/ - {'x', 'x', '+', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', ' ', 'x' ,'x' ,' ', 'x', ' '}, + {'x', 'x', '+', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', ' ', 'x' ,'x' ,' ', 'x', ' ', 'x'}, /*EDIT*/ - {'x', 'x', '+', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', ' ', 'x' ,'x' ,' ', 'x', ' '}, + {'x', 'x', '+', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', ' ', 'x' ,'x' ,' ', 'x', ' ', 'x'}, /*DEL*/ - {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*FLUSH*/ - {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*LIST*/ - {' ', '1', '1', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '1', '1', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x' ,' ' ,'x', ' ', 'x'}, + {' ', '1', '1', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '1', '1', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x' ,' ' ,'x', ' ', 'x', 'x'}, /*ADDSRV*/ - {'x', 'x', '+', 'x', 'x', 'x', '+', ' ', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', '+', 'x', 'x', 'x', '+', ' ', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*DELSRV*/ - {'x', 'x', '+', 'x', 'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', '+', 'x', 'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*EDITSRV*/ - {'x', 'x', '+', 'x', 'x', 'x', '+', ' ', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', '+', 'x', 'x', 'x', '+', ' ', ' ', ' ', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*TIMEOUT*/ - {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*STARTD*/ - {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*STOPD*/ - {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*RESTORE*/ - {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*SAVE*/ - {' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*ZERO*/ - {'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*ADDLADDR*/ - {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '+', 'x', 'x', '+' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '+', 'x', 'x', '+' ,'x' ,'x', 'x', 'x', 'x'}, /*DELLADDR*/ - {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '+', 'x', 'x', '+' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '+', 'x', 'x', '+' ,'x' ,'x', 'x', 'x', 'x'}, /*GETLADDR*/ - {'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', ' ', 'x'}, + {'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', ' ', 'x', 'x'}, /*ADDBLKLST*/ - {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '+', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '+', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*DELBLKLST*/ - {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '+', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', '+', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, /*GETBLKLST*/ - {'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x'}, + {'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, +/*ADDWHTLST*/ + {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', '+'}, +/*DELWHTLST*/ + {'x', 'x', '+', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', '+'}, +/*GETWHTLST*/ + {'x', 'x', ' ', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x' ,'x' ,'x', 'x', 'x', 'x'}, }; /* printing format flags */ @@ -286,6 +300,7 @@ struct ipvs_command_entry { ipvs_daemon_t daemon; ipvs_laddr_t laddr; ipvs_blklst_t blklst; + ipvs_whtlst_t whtlst; ipvs_sockpair_t sockpair; lcoreid_t cid; }; @@ -354,6 +369,9 @@ static int list_all_laddrs(lcoreid_t cid); static void list_blklsts_print_title(void); static int list_blklst(int af, const union nf_inet_addr *addr, uint16_t port, uint16_t protocol); static int list_all_blklsts(void); +static void list_whtlsts_print_title(void); +static int list_whtlst(int af, const union nf_inet_addr *addr, uint16_t port, uint16_t protocol); +static int list_all_whtlsts(void); static int process_options(int argc, char **argv, int reading_stdin); @@ -419,6 +437,9 @@ parse_options(int argc, char **argv, struct ipvs_command_entry *ce, { "add-blklst", 'U', POPT_ARG_NONE, NULL, 'U', NULL, NULL }, { "del-blklst", 'V', POPT_ARG_NONE, NULL, 'V', NULL, NULL }, { "get-blklst", 'B', POPT_ARG_NONE, NULL, 'B', NULL, NULL }, + { "add-whtlst", 'O', POPT_ARG_NONE, NULL, 'O', NULL, NULL }, + { "del-whtlst", 'Y', POPT_ARG_NONE, NULL, 'Y', NULL, NULL }, + { "get-whtlst", 'W', POPT_ARG_NONE, NULL, 'W', NULL, NULL }, { "tcp-service", 't', POPT_ARG_STRING, &optarg, 't', NULL, NULL }, { "udp-service", 'u', POPT_ARG_STRING, &optarg, 'u', @@ -471,6 +492,7 @@ parse_options(int argc, char **argv, struct ipvs_command_entry *ce, NULL, NULL }, { "laddr", 'z', POPT_ARG_STRING, &optarg, 'z', NULL, NULL }, { "blklst", 'k', POPT_ARG_STRING, &optarg, 'k', NULL, NULL }, + { "whtlst", '2', POPT_ARG_STRING, &optarg, '2', NULL, NULL }, { "synproxy", 'j' , POPT_ARG_STRING, &optarg, 'j', NULL, NULL }, { "ifname", 'F', POPT_ARG_STRING, &optarg, 'F', NULL, NULL }, { "match", 'H', POPT_ARG_STRING, &optarg, 'H', NULL, NULL }, @@ -564,6 +586,15 @@ parse_options(int argc, char **argv, struct ipvs_command_entry *ce, case 'B': set_command(&ce->cmd, CMD_GETBLKLST); break; + case 'O': + set_command(&ce->cmd, CMD_ADDWHTLST); + break; + case 'Y': + set_command(&ce->cmd, CMD_DELWHTLST); + break; + case 'W': + set_command(&ce->cmd, CMD_GETWHTLST); + break; default: tryhelp_exit(argv[0], -1); } @@ -781,6 +812,19 @@ parse_options(int argc, char **argv, struct ipvs_command_entry *ce, ce->blklst.__addr_v4 = nsvc.nf_addr.ip; break; + } + case '2': + { + ipvs_service_t nsvc; + set_option(options,OPT_WHTLST_ADDRESS); + parse = parse_service(optarg, &nsvc); + if (!(parse & SERVICE_ADDR)) + fail(2, "illegal whitelist address"); + ce->whtlst.af = nsvc.af; + ce->whtlst.addr = nsvc.nf_addr; + ce->whtlst.__addr_v4 = nsvc.nf_addr.ip; + break; + } case 'F': set_option(options, OPT_IFNAME); @@ -1056,6 +1100,23 @@ static int process_options(int argc, char **argv, int reading_stdin) else result = list_all_blklsts(); break; + + case CMD_ADDWHTLST: + result = ipvs_add_whtlst(&ce.svc , &ce.whtlst); + break; + + case CMD_DELWHTLST: + result = ipvs_del_whtlst(&ce.svc , &ce.whtlst); + break; + + case CMD_GETWHTLST: + if(options & OPT_SERVICE) { + list_whtlsts_print_title(); + result = list_whtlst(ce.svc.af, &ce.svc.nf_addr, ce.svc.user.port, ce.svc.user.protocol); + } + else + result = list_all_whtlsts(); + break; } if (result) fprintf(stderr, "%s\n", ipvs_strerror(errno)); @@ -1459,6 +1520,7 @@ static void usage_exit(const char *program, const int exit_status) " %s -P|Q -t|u|q|f service-address -z local-address\n" " %s -G -t|u|q|f service-address \n" " %s -U|V -t|u|q|f service-address -k blacklist-address\n" + " %s -O|Y -t|u|q|f service-address -2 whitelist-address\n" " %s -a|e -t|u|q|f service-address -r server-address [options]\n" " %s -d -t|u|q|f service-address -r server-address\n" " %s -L|l [options]\n" @@ -1468,7 +1530,7 @@ static void usage_exit(const char *program, const int exit_status) " %s --stop-daemon state\n" " %s -h\n\n", program, program, program, - program, program, program, + program, program, program, program, program, program, program, program, program, program, program, program, program, program); @@ -1486,6 +1548,9 @@ static void usage_exit(const char *program, const int exit_status) " --add-blklst -U add blacklist address\n" " --del-blklst -V del blacklist address\n" " --get-blklst -B get blacklist address\n" + " --add-whtlst -O add whitelist address\n" + " --del-whtlst -Y del whitelist address\n" + " --get-whtlst -W get whitelist address\n" " --save -S save rules to stdout\n" " --add-server -a add real server with options\n" " --edit-server -e edit real server with options\n" @@ -2204,6 +2269,81 @@ static int list_all_blklsts(void) return 0; } +static void list_whtlsts_print_title(void) +{ + printf("%-20s %-8s %-20s\n" , + "VIP:VPORT" , + "PROTO" , + "WHITELIST"); +} + +static void print_service_and_whtlsts(struct dp_vs_whtlst_conf *whtlst) +{ + char vip[64], bip[64], port[8], proto[8]; + const char *pattern = (whtlst->af == AF_INET ? + "%s:%-8s %-8s %-20s\n" : "[%s]:%-8s %-8s %-20s\n"); + + switch (whtlst->proto) { + case IPPROTO_TCP: + snprintf(proto, sizeof(proto), "%s", "TCP"); + break; + case IPPROTO_UDP: + snprintf(proto, sizeof(proto), "%s", "UDP"); + break; + case IPPROTO_ICMP: + snprintf(proto, sizeof(proto), "%s", "ICMP"); + break; + case IPPROTO_ICMPV6: + snprintf(proto, sizeof(proto), "%s", "IMCPv6"); + break; + default: + break; + } + + snprintf(port, sizeof(port), "%u", ntohs(whtlst->vport)); + + printf(pattern, inet_ntop(whtlst->af, (const void *)&whtlst->vaddr, vip, sizeof(vip)), + port, proto, inet_ntop(whtlst->af, (const void *)&whtlst->whtlst, bip, sizeof(bip))); +} +static int list_whtlst(int af, const union nf_inet_addr *addr, uint16_t port, uint16_t protocol) +{ + int i; + struct dp_vs_whtlst_conf_array *get; + + if (!(get = ipvs_get_whtlsts())) { + fprintf(stderr, "%s\n", ipvs_strerror(errno)); + return -1; + } + + for (i = 0; i < get->naddr; i++) { + if (inet_addr_equal(af, addr,(const union nf_inet_addr *) &get->whtlsts[i].vaddr) && + port == get->whtlsts[i].vport && protocol == get->whtlsts[i].proto) { + print_service_and_whtlsts(&get->whtlsts[i]); + } + } + free(get); + + return 0; +} + +static int list_all_whtlsts(void) +{ + struct ip_vs_get_services_app *get; + int i; + + if (!(get = ipvs_get_services(0))) { + fprintf(stderr, "%s\n", ipvs_strerror(errno)); + exit(1); + } + + list_whtlsts_print_title(); + for (i = 0; i < get->user.num_services; i++) + list_whtlst(get->user.entrytable[i].af, &get->user.entrytable[i].nf_addr, + get->user.entrytable[i].user.port, get->user.entrytable[i].user.protocol); + free(get); + return 0; +} + static void list_service(ipvs_service_t *svc, unsigned int format, lcoreid_t cid) { ipvs_service_entry_t *entry; diff --git a/tools/keepalived/keepalived/check/check_data.c b/tools/keepalived/keepalived/check/check_data.c index b0d77ea83..fe2d3ee6e 100644 --- a/tools/keepalived/keepalived/check/check_data.c +++ b/tools/keepalived/keepalived/check/check_data.c @@ -98,6 +98,78 @@ dump_ssl(FILE *fp) conf_write(fp, " Key file : %s", ssl->keyfile); } +/* whitelist IP address group facility functions */ +static void +free_whtlst_group(void *data) +{ + whtlst_addr_group *whtlst_group = data; + FREE_PTR(whtlst_group->gname); + free_list(&whtlst_group->addr_ip); + free_list(&whtlst_group->range); + FREE(whtlst_group); +} +static void +dump_whtlst_group(FILE *fp, const void *data) +{ + const whtlst_addr_group *whtlst_group = data; + + conf_write(fp, " whitelist IP address group = %s", whtlst_group->gname); + dump_list(fp, whtlst_group->addr_ip); + dump_list(fp, whtlst_group->range); +} +static void +free_whtlst_entry(void *data) +{ + FREE(data); +} +static void +dump_whtlst_entry(FILE *fp, const void *data) +{ + const whtlst_addr_entry *whtlst_entry = data; + + if (whtlst_entry->range) + conf_write(fp, " IP Range = %s-%d" + , inet_sockaddrtos(&whtlst_entry->addr) + , whtlst_entry->range); + else + conf_write(fp, " IP = %s" + , inet_sockaddrtos(&whtlst_entry->addr)); +} +void +alloc_whtlst_group(char *gname) +{ + int size = strlen(gname); + whtlst_addr_group *new; + + new = (whtlst_addr_group *) MALLOC(sizeof (whtlst_addr_group)); + new->gname = (char *) MALLOC(size + 1); + memcpy(new->gname, gname, size); + new->addr_ip = alloc_list(free_whtlst_entry, dump_whtlst_entry); + new->range = alloc_list(free_whtlst_entry, dump_whtlst_entry); + + list_add(check_data->whtlst_group, new); +} +void +alloc_whtlst_entry(const vector_t *strvec) +{ + whtlst_addr_group *whtlst_group = LIST_TAIL_DATA(check_data->whtlst_group); + whtlst_addr_entry *new; + + new = (whtlst_addr_entry *) MALLOC(sizeof (whtlst_addr_entry)); + + inet_stor(vector_slot(strvec, 0), &new->range); + if (new->range == UINT32_MAX) + new->range = 0; + inet_stosockaddr(vector_slot(strvec, 0), NULL, &new->addr); + + if (!new->range) + list_add(whtlst_group->addr_ip, new); + else if ( (0 < new->range) && (new->range < 255) ) + list_add(whtlst_group->range, new); + else + log_message(LOG_INFO, "invalid: whitelist IP address range %d", new->range); +} + /* Virtual server group facility functions */ static void free_vsg(void *data) @@ -271,6 +343,7 @@ free_vs(void *data) free_notify_script(&vs->notify_quorum_down); FREE_PTR(vs->local_addr_gname); FREE_PTR(vs->blklst_addr_gname); + FREE_PTR(vs->whtlst_addr_gname); FREE_PTR(vs->vip_bind_dev); FREE(vs); } @@ -531,6 +604,7 @@ alloc_vs(const char *param1, const char *param2) new->expire_quiescent_conn = false; new->local_addr_gname = NULL; new->blklst_addr_gname = NULL; + new->whtlst_addr_gname = NULL; new->vip_bind_dev = NULL; new->hash_target = 0; new->bps = 0; @@ -974,6 +1048,7 @@ alloc_check_data(void) #endif new->laddr_group = alloc_list(free_laddr_group, dump_laddr_group); new->blklst_group = alloc_list(free_blklst_group, dump_blklst_group); + new->whtlst_group = alloc_list(free_whtlst_group, dump_whtlst_group); new->tunnel_group = alloc_list(free_tunnel_group, dump_tunnel_group); return new; diff --git a/tools/keepalived/keepalived/check/check_parser.c b/tools/keepalived/keepalived/check/check_parser.c index ca50b5dba..85d5c4c54 100644 --- a/tools/keepalived/keepalived/check/check_parser.c +++ b/tools/keepalived/keepalived/check/check_parser.c @@ -978,6 +978,20 @@ blklst_gname_handler(const vector_t *strvec) virtual_server_t *vs = LIST_TAIL_DATA(check_data->vs); vs->blklst_addr_gname = set_value(strvec); } +static void +whtlst_group_handler(const vector_t *strvec) +{ + if (!strvec) + return; + alloc_whtlst_group(vector_slot(strvec, 1)); + alloc_value_block(alloc_whtlst_entry, strvec_slot(strvec, 0)); +} +static void +whtlst_gname_handler(const vector_t *strvec) +{ + virtual_server_t *vs = LIST_TAIL_DATA(check_data->vs); + vs->whtlst_addr_gname = set_value(strvec); +} static void tunnel_handler(const vector_t *strvec) @@ -1124,6 +1138,8 @@ init_check_keywords(bool active) install_keyword_root("local_address_group", &laddr_group_handler, active); /* blacklist IP */ install_keyword_root("deny_address_group", &blklst_group_handler, active); + /* whitelist IP */ + install_keyword_root("allow_address_group", &whtlst_group_handler, active); /* Virtual server mapping */ install_keyword_root("virtual_server_group", &vsg_handler, active); @@ -1210,6 +1226,7 @@ init_check_keywords(bool active) install_sublevel_end(); install_keyword("laddr_group_name", &laddr_gname_handler); install_keyword("daddr_group_name", &blklst_gname_handler); + install_keyword("aaddr_group_name", &whtlst_gname_handler); install_keyword("syn_proxy", &syn_proxy_handler); install_keyword("expire_quiescent_conn", &expire_quiescent_handler); install_keyword("vip_bind_dev", &bind_dev_handler); diff --git a/tools/keepalived/keepalived/check/ipvswrapper.c b/tools/keepalived/keepalived/check/ipvswrapper.c old mode 100644 new mode 100755 index 546fadedd..fa601ea28 --- a/tools/keepalived/keepalived/check/ipvswrapper.c +++ b/tools/keepalived/keepalived/check/ipvswrapper.c @@ -87,6 +87,19 @@ ipvs_get_blklst_group_by_name(char *gname, list l) return NULL; } +whtlst_addr_group * __attribute__ ((pure)) +ipvs_get_whtlst_group_by_name(char *gname, list l) +{ + element e; + whtlst_addr_group *whtlst_group; + + LIST_FOREACH (l, whtlst_group, e) { + if (!strcmp(whtlst_group->gname, gname)) + return whtlst_group; + } + return NULL; +} + /* Initialization helpers */ int ipvs_start(void) @@ -136,6 +149,7 @@ ipvs_talk(int cmd, ipvs_daemon_t *daemonrule, ipvs_laddr_t *laddr_rule, ipvs_blklst_t *blklst_rule, + ipvs_whtlst_t *whtlst_rule, ipvs_tunnel_t *tunnel_rule, bool ignore_error) { @@ -191,6 +205,12 @@ ipvs_talk(int cmd, case IP_VS_SO_SET_DELBLKLST: result = ipvs_del_blklst(srule, blklst_rule); break; + case IP_VS_SO_SET_ADDWHTLST: + result = ipvs_add_whtlst(srule, whtlst_rule); + break; + case IP_VS_SO_SET_DELWHTLST: + result = ipvs_del_whtlst(srule, whtlst_rule); + break; case IP_VS_SO_SET_ADDTUNNEL: result = ipvs_add_tunnel(tunnel_rule); break; @@ -259,14 +279,14 @@ ipvs_syncd_cmd(int cmd, const struct lvs_syncd_config *config, int state, bool i } /* Talk to the IPVS channel */ - ipvs_talk(cmd, NULL, NULL, &daemonrule, NULL, NULL, NULL, ignore_error); + ipvs_talk(cmd, NULL, NULL, &daemonrule, NULL, NULL, NULL, NULL, ignore_error); } #endif void ipvs_flush_cmd(void) { - ipvs_talk(IP_VS_SO_SET_FLUSH, NULL, NULL, NULL, NULL, NULL, NULL, false); + ipvs_talk(IP_VS_SO_SET_FLUSH, NULL, NULL, NULL, NULL, NULL, NULL, NULL, false); } /* IPVS group range rule */ @@ -284,7 +304,7 @@ ipvs_group_range_cmd(int cmd, ipvs_service_t *srule, ipvs_dest_t *drule, virtual /* Process the whole range */ for (i = 0; i <= vsg_entry->range; i++) { /* Talk to the IPVS channel */ - if (ipvs_talk(cmd, srule, drule, NULL, NULL, NULL, NULL, false)) + if (ipvs_talk(cmd, srule, drule, NULL, NULL, NULL, NULL, NULL, false)) return -1; if (srule->af == AF_INET) @@ -410,7 +430,7 @@ ipvs_group_cmd(int cmd, ipvs_service_t *srule, ipvs_dest_t *drule, virtual_serve srule->nf_addr.ip = inet_sockaddrip4(&vsg_entry->addr); /* Talk to the IPVS channel */ - if (ipvs_talk(cmd, srule, drule, NULL, NULL, NULL, NULL, false)) + if (ipvs_talk(cmd, srule, drule, NULL, NULL, NULL, NULL, NULL, false)) return -1; } } @@ -429,7 +449,7 @@ ipvs_group_cmd(int cmd, ipvs_service_t *srule, ipvs_dest_t *drule, virtual_serve /* Talk to the IPVS channel */ if (ipvs_change_needed(cmd, vsg_entry, vs, rs)) { - if (ipvs_talk(cmd, srule, drule, NULL, NULL, NULL, NULL, false)) + if (ipvs_talk(cmd, srule, drule, NULL, NULL, NULL, NULL, NULL, false)) return -1; } ipvs_set_vsge_alive_state(cmd, vsg_entry, vs); @@ -461,7 +481,7 @@ ipvs_laddr_range_cmd(int cmd, local_addr_entry *laddr_entry, virtual_server_t *v laddr_rule.addr.ip = addr_ip; strncpy(laddr_rule.ifname, laddr_entry->ifname, sizeof(laddr_rule.ifname)); - ipvs_talk(cmd, srule, NULL, NULL, &laddr_rule, NULL, NULL, false); + ipvs_talk(cmd, srule, NULL, NULL, &laddr_rule, NULL, NULL, NULL, false); } } @@ -486,7 +506,7 @@ ipvs_laddr_group_cmd(int cmd, local_addr_group *laddr_group, virtual_server_t *v laddr_rule.addr.ip = inet_sockaddrip4(&laddr_entry->addr); strncpy(laddr_rule.ifname, laddr_entry->ifname, sizeof(laddr_rule.ifname)); - ipvs_talk(cmd, srule, NULL, NULL, &laddr_rule, NULL, NULL, false); + ipvs_talk(cmd, srule, NULL, NULL, &laddr_rule, NULL, NULL, NULL, false); } l = laddr_group->range; @@ -600,7 +620,7 @@ ipvs_blklst_range_cmd(int cmd, blklst_addr_entry *blklst_entry, ipvs_service_t * else blklst_rule.addr.ip = addr_ip; - ipvs_talk(cmd, srule, NULL, NULL, NULL, &blklst_rule, NULL, false); + ipvs_talk(cmd, srule, NULL, NULL, NULL, &blklst_rule, NULL, NULL, false); } } @@ -623,7 +643,7 @@ ipvs_blklst_group_cmd(int cmd, blklst_addr_group *blklst_group, ipvs_service_t * inet_sockaddrip6(&blklst_entry->addr, &blklst_rule.addr.in6); else blklst_rule.addr.ip = inet_sockaddrip4(&blklst_entry->addr); - ipvs_talk(cmd, srule, NULL, NULL, NULL, &blklst_rule, NULL, false); + ipvs_talk(cmd, srule, NULL, NULL, NULL, &blklst_rule, NULL, NULL, false); } l = blklst_group->range; @@ -808,6 +828,151 @@ ipvs_set_drule(int cmd, ipvs_dest_t *drule, real_server_t * rs) #endif } +/*check whitelist addr*/ + +static void +ipvs_whtlst_range_cmd(int cmd, whtlst_addr_entry *whtlst_entry, ipvs_service_t *srule) +{ + uint32_t addr_ip, ip; + ipvs_whtlst_t whtlst_rule; + + memset(&whtlst_rule, 0, sizeof(ipvs_whtlst_t)); + whtlst_rule.af = whtlst_entry->addr.ss_family; + if (whtlst_entry->addr.ss_family == AF_INET6) { + inet_sockaddrip6(&whtlst_entry->addr, &whtlst_rule.addr.in6); + ip = whtlst_rule.addr.in6.s6_addr32[3]; + } else { + ip = inet_sockaddrip4(&whtlst_entry->addr); + } + + for (addr_ip = ip; ((addr_ip >> 24) & 0xFF) <= whtlst_entry->range; + addr_ip += 0x01000000) { + if (whtlst_entry->addr.ss_family == AF_INET6) + whtlst_rule.addr.in6.s6_addr32[3] = addr_ip; + else + whtlst_rule.addr.ip = addr_ip; + + ipvs_talk(cmd, srule, NULL, NULL, NULL, NULL, &whtlst_rule, NULL, false); + } +} + +static void +ipvs_whtlst_group_cmd(int cmd, whtlst_addr_group *whtlst_group, ipvs_service_t *srule) +{ + whtlst_addr_entry *whtlst_entry; + ipvs_whtlst_t whtlst_rule; + list l; + element e; + + if (!whtlst_group) + return; + + l = whtlst_group->addr_ip; + LIST_FOREACH(l, whtlst_entry, e) { + memset(&whtlst_rule, 0, sizeof(ipvs_whtlst_t)); + whtlst_rule.af = whtlst_entry->addr.ss_family; + if (whtlst_entry->addr.ss_family == AF_INET6) + inet_sockaddrip6(&whtlst_entry->addr, &whtlst_rule.addr.in6); + else + whtlst_rule.addr.ip = inet_sockaddrip4(&whtlst_entry->addr); + ipvs_talk(cmd, srule, NULL, NULL, NULL, NULL, &whtlst_rule, NULL, false); + } + + l = whtlst_group->range; + LIST_FOREACH(l, whtlst_entry, e) { + ipvs_whtlst_range_cmd(cmd, whtlst_entry, srule); + } +} + +static void +ipvs_whtlst_vsg_cmd(int cmd, + list vs_group, + virtual_server_t *vs, + whtlst_addr_group *whtlst_group, + ipvs_service_t *srule) +{ + virtual_server_group_t *vsg = ipvs_get_group_by_name(vs->vsgname, vs_group); + virtual_server_group_entry_t *vsg_entry; + list l; + element e; + if (!vsg) + return; + + /* visit range list */ + l = vsg->addr_range; + LIST_FOREACH(l, vsg_entry, e) { + uint32_t addr_ip, ip; + + srule->af = vsg_entry->addr.ss_family; + if (srule->af == AF_INET6) { + inet_sockaddrip6(&vsg_entry->addr, &srule->nf_addr.in6); + ip = srule->nf_addr.in6.s6_addr32[3]; + } else { + ip = inet_sockaddrip4(&vsg_entry->addr); + } + + if (!vsg_entry->range) { + if (srule->af == AF_INET6) { + if (srule->user.netmask == 0xffffffff) + srule->user.netmask = 128; + srule->nf_addr.in6.s6_addr32[3] = ip; + } else { + srule->nf_addr.ip = ip; + } + srule->user.port = inet_sockaddrport(&vsg_entry->addr); + + ipvs_whtlst_group_cmd(cmd, whtlst_group, srule); + continue; + } + + /* Parse the whole range */ + for (addr_ip = ip; + ((addr_ip >> 24) & 0xFF) <= vsg_entry->range; + addr_ip += 0x01000000) { + if (srule->af == AF_INET6) { + if (srule->user.netmask == 0xffffffff) + srule->user.netmask = 128; + srule->nf_addr.in6.s6_addr32[3] = addr_ip; + } else { + srule->nf_addr.ip = addr_ip; + } + srule->user.port = inet_sockaddrport(&vsg_entry->addr); + + ipvs_whtlst_group_cmd(cmd, whtlst_group, srule); + } + } +} + +static int +ipvs_whtlst_cmd(int cmd, ipvs_service_t *srule, virtual_server_t * vs) +{ + whtlst_addr_group *whtlst_group = ipvs_get_whtlst_group_by_name(vs->whtlst_addr_gname, + check_data->whtlst_group); + if (!whtlst_group) { + log_message(LOG_ERR, "No address in group %s", vs->whtlst_addr_gname); + return -1; + } + + memset(srule, 0, sizeof(ipvs_service_t)); + srule->user.netmask = (vs->addr.ss_family == AF_INET6) ? 128 : ((u_int32_t) 0xffffffff); + srule->user.protocol = vs->service_type; + + if(vs->vsgname) { + ipvs_whtlst_vsg_cmd(cmd, check_data->vs_group, vs, whtlst_group, srule); + } else { + if (!vs->vfwmark) { + srule->af = vs->addr.ss_family; + if (vs->addr.ss_family == AF_INET6) + inet_sockaddrip6(&vs->addr, &srule->nf_addr.in6); + else + srule->nf_addr.ip = inet_sockaddrip4(&vs->addr); + srule->user.port = inet_sockaddrport(&vs->addr); + ipvs_whtlst_group_cmd(cmd, whtlst_group, srule); + } + } + return IPVS_SUCCESS; +} + int ipvs_tunnel_cmd(int cmd, tunnel_entry *entry) { ipvs_tunnel_t tunnel_rule; @@ -818,7 +983,7 @@ int ipvs_tunnel_cmd(int cmd, tunnel_entry *entry) tunnel_rule.laddr.ip = inet_sockaddrip4(&entry->local); tunnel_rule.raddr.ip = inet_sockaddrip4(&entry->remote); - ipvs_talk(cmd, NULL, NULL, NULL, NULL, NULL, &tunnel_rule, false); + ipvs_talk(cmd, NULL, NULL, NULL, NULL, NULL, NULL, &tunnel_rule, false); return IPVS_SUCCESS; } @@ -839,6 +1004,10 @@ ipvs_cmd(int cmd, virtual_server_t *vs, real_server_t *rs) /* Set/Remove deny address */ if (cmd == IP_VS_SO_SET_ADDBLKLST || cmd == IP_VS_SO_SET_DELBLKLST) return ipvs_blklst_cmd(cmd, &srule, vs); + /* Set/Remove allow address */ + if (cmd == IP_VS_SO_SET_ADDWHTLST || cmd == IP_VS_SO_SET_DELWHTLST) + return ipvs_whtlst_cmd(cmd, &srule, vs); + if (rs) { ipvs_set_drule(cmd, &drule, rs); @@ -886,7 +1055,7 @@ ipvs_cmd(int cmd, virtual_server_t *vs, real_server_t *rs) } /* Talk to the IPVS channel */ - return ipvs_talk(cmd, &srule, &drule, NULL, NULL, NULL, NULL, false); + return ipvs_talk(cmd, &srule, &drule, NULL, NULL, NULL, NULL, NULL, false); } /* at reload, add alive destinations to the newly created vsge */ @@ -914,7 +1083,7 @@ ipvs_group_sync_entry(virtual_server_t *vs, virtual_server_group_entry_t *vsge) /* Set vs rule */ if (vsge->is_fwmark) { /* Talk to the IPVS channel */ - ipvs_talk(IP_VS_SO_SET_ADDDEST, &srule, &drule, NULL, NULL, NULL, NULL, false); + ipvs_talk(IP_VS_SO_SET_ADDDEST, &srule, &drule, NULL, NULL, NULL, NULL, NULL, false); } else ipvs_group_range_cmd(IP_VS_SO_SET_ADDDEST, &srule, &drule, vsge); @@ -976,6 +1145,7 @@ ipvs_rm_lentry_from_vsg(local_addr_entry *laddr_entry, virtual_server_t *vs) NULL/*daemonrule*/, &laddr_rule, NULL/*blklst_rule*/, + NULL/*whtlst_rule*/, NULL, false); } @@ -1007,6 +1177,7 @@ ipvs_rm_lentry_from_vsg(local_addr_entry *laddr_entry, virtual_server_t *vs) NULL/*daemonrule*/, &laddr_rule, NULL/*blklst_rule*/, + NULL/*whtlst_rule*/, NULL, false); } @@ -1047,7 +1218,7 @@ ipvs_laddr_remove_entry(virtual_server_t *vs, local_addr_entry *laddr_entry) laddr_rule.addr.ip = inet_sockaddrip4(&laddr_entry->addr); strncpy(laddr_rule.ifname, laddr_entry->ifname, sizeof(laddr_rule.ifname)); - ipvs_talk(IP_VS_SO_SET_DELLADDR, &srule, NULL, NULL, &laddr_rule, NULL, NULL, false); + ipvs_talk(IP_VS_SO_SET_DELLADDR, &srule, NULL, NULL, &laddr_rule, NULL, NULL, NULL, false); } } @@ -1055,13 +1226,14 @@ ipvs_laddr_remove_entry(virtual_server_t *vs, local_addr_entry *laddr_entry) } static void -ipvs_rm_bentry_from_vsg(blklst_addr_entry *blklst_entry, const char *vsgname, ipvs_service_t *srule) +ipvs_rm_bentry_from_vsg(blklst_addr_entry *blklst_entry, whtlst_addr_entry *whtlst_entry, const char *vsgname, ipvs_service_t *srule) { list l; element e; virtual_server_group_t *vsg; virtual_server_group_entry_t *vsg_entry; ipvs_blklst_t blklst_rule; + ipvs_whtlst_t whtlst_rule; vsg = ipvs_get_group_by_name(vsgname, check_data->vs_group); if (!vsg) return; @@ -1086,16 +1258,31 @@ ipvs_rm_bentry_from_vsg(blklst_addr_entry *blklst_entry, const char *vsgname, ip else srule->nf_addr.ip = ip; - if (blklst_entry->range) - ipvs_blklst_range_cmd(IP_VS_SO_SET_DELBLKLST, blklst_entry, srule); - else { - memset(&blklst_rule, 0, sizeof(ipvs_blklst_t)); - blklst_rule.af = blklst_entry->addr.ss_family; - if (blklst_entry->addr.ss_family == AF_INET6) - inet_sockaddrip6(&blklst_entry->addr, &blklst_rule.addr.in6); - else - blklst_rule.addr.ip = inet_sockaddrip4(&blklst_entry->addr); - ipvs_talk(IP_VS_SO_SET_DELBLKLST, srule, NULL, NULL, NULL, &blklst_rule, NULL, false); + if (blklst_entry != NULL) { + if (blklst_entry->range) + ipvs_blklst_range_cmd(IP_VS_SO_SET_DELBLKLST, blklst_entry, srule); + else { + memset(&blklst_rule, 0, sizeof(ipvs_blklst_t)); + blklst_rule.af = blklst_entry->addr.ss_family; + if (blklst_entry->addr.ss_family == AF_INET6) + inet_sockaddrip6(&blklst_entry->addr, &blklst_rule.addr.in6); + else + blklst_rule.addr.ip = inet_sockaddrip4(&blklst_entry->addr); + ipvs_talk(IP_VS_SO_SET_DELBLKLST, srule, NULL, NULL, NULL, &blklst_rule, NULL, NULL, false); + } + } + if (whtlst_entry != NULL) { + if (whtlst_entry->range) + ipvs_whtlst_range_cmd(IP_VS_SO_SET_DELWHTLST, whtlst_entry, srule); + else { + memset(&whtlst_rule, 0, sizeof(ipvs_whtlst_t)); + whtlst_rule.af = whtlst_entry->addr.ss_family; + if (whtlst_entry->addr.ss_family == AF_INET6) + inet_sockaddrip6(&whtlst_entry->addr, &whtlst_rule.addr.in6); + else + whtlst_rule.addr.ip = inet_sockaddrip4(&whtlst_entry->addr); + ipvs_talk(IP_VS_SO_SET_DELWHTLST, srule, NULL, NULL, NULL, NULL, &whtlst_rule, NULL, false); + } } continue; } @@ -1107,18 +1294,35 @@ ipvs_rm_bentry_from_vsg(blklst_addr_entry *blklst_entry, const char *vsgname, ip srule->nf_addr.in6.s6_addr32[3] = addr_ip; else srule->nf_addr.ip = addr_ip; - - if (blklst_entry->range) - ipvs_blklst_range_cmd(IP_VS_SO_SET_DELBLKLST, blklst_entry, srule); - else { - memset(&blklst_rule, 0, sizeof(ipvs_blklst_t)); - blklst_rule.af = blklst_entry->addr.ss_family; - if (blklst_entry->addr.ss_family == AF_INET6) - inet_sockaddrip6(&blklst_entry->addr, &blklst_rule.addr.in6); - else - blklst_rule.addr.ip = inet_sockaddrip4(&blklst_entry->addr); - - ipvs_talk(IP_VS_SO_SET_DELBLKLST, srule, NULL, NULL, NULL, &blklst_rule, NULL, false); + if (blklst_entry != NULL) + { + if (blklst_entry->range) + ipvs_blklst_range_cmd(IP_VS_SO_SET_DELBLKLST, blklst_entry, srule); + else { + memset(&blklst_rule, 0, sizeof(ipvs_blklst_t)); + blklst_rule.af = blklst_entry->addr.ss_family; + if (blklst_entry->addr.ss_family == AF_INET6) + inet_sockaddrip6(&blklst_entry->addr, &blklst_rule.addr.in6); + else + blklst_rule.addr.ip = inet_sockaddrip4(&blklst_entry->addr); + + ipvs_talk(IP_VS_SO_SET_DELBLKLST, srule, NULL, NULL, NULL, &blklst_rule, NULL, NULL, false); + } + } + if (whtlst_entry != NULL) + { + if (whtlst_entry->range) + ipvs_whtlst_range_cmd(IP_VS_SO_SET_DELWHTLST, whtlst_entry, srule); + else { + memset(&whtlst_rule, 0, sizeof(ipvs_whtlst_t)); + whtlst_rule.af = whtlst_entry->addr.ss_family; + if (whtlst_entry->addr.ss_family == AF_INET6) + inet_sockaddrip6(&whtlst_entry->addr, &whtlst_rule.addr.in6); + else + whtlst_rule.addr.ip = inet_sockaddrip4(&whtlst_entry->addr); + + ipvs_talk(IP_VS_SO_SET_DELWHTLST, srule, NULL, NULL, NULL, NULL, &whtlst_rule, NULL, false); + } } } } @@ -1134,7 +1338,7 @@ ipvs_blklst_remove_entry(virtual_server_t *vs, blklst_addr_entry *blklst_entry) srule.user.protocol = vs->service_type; if (vs->vsgname) { - ipvs_rm_bentry_from_vsg(blklst_entry, vs->vsgname, &srule); + ipvs_rm_bentry_from_vsg(blklst_entry, NULL, vs->vsgname, &srule); } else if (!vs->vfwmark) { srule.af = vs->addr.ss_family; if (vs->addr.ss_family == AF_INET6) { @@ -1156,13 +1360,53 @@ ipvs_blklst_remove_entry(virtual_server_t *vs, blklst_addr_entry *blklst_entry) else blklst_rule.addr.ip = inet_sockaddrip4(&blklst_entry->addr); - ipvs_talk(IP_VS_SO_SET_DELBLKLST, &srule, NULL, NULL, NULL, &blklst_rule, NULL, false); + ipvs_talk(IP_VS_SO_SET_DELBLKLST, &srule, NULL, NULL, NULL, &blklst_rule, NULL, NULL, false); } } return IPVS_SUCCESS; } +int +ipvs_whtlst_remove_entry(virtual_server_t *vs, whtlst_addr_entry *whtlst_entry) +{ + ipvs_service_t srule; + ipvs_whtlst_t whtlst_rule; + + memset(&srule, 0, sizeof(ipvs_service_t)); + srule.user.protocol = vs->service_type; + + if (vs->vsgname) { + ipvs_rm_bentry_from_vsg(NULL, whtlst_entry, vs->vsgname, &srule); + } else if (!vs->vfwmark) { + srule.af = vs->addr.ss_family; + if (vs->addr.ss_family == AF_INET6) { + srule.user.netmask = 128; + inet_sockaddrip6(&vs->addr, &srule.nf_addr.in6); + } else { + srule.user.netmask = 0xffffffff; + srule.nf_addr.ip = inet_sockaddrip4(&vs->addr); + } + srule.user.port = inet_sockaddrport(&vs->addr); + + if (whtlst_entry->range) { + ipvs_whtlst_range_cmd(IP_VS_SO_SET_DELWHTLST, whtlst_entry, &srule); + } else { + memset(&whtlst_rule, 0, sizeof(ipvs_whtlst_t)); + whtlst_rule.af = whtlst_entry->addr.ss_family; + if (whtlst_entry->addr.ss_family == AF_INET6) + inet_sockaddrip6(&whtlst_entry->addr, &whtlst_rule.addr.in6); + else + whtlst_rule.addr.ip = inet_sockaddrip4(&whtlst_entry->addr); + + ipvs_talk(IP_VS_SO_SET_DELWHTLST, &srule, NULL, NULL, NULL, NULL, &whtlst_rule, NULL, false); + } + } + + return IPVS_SUCCESS; +} + + /* Remove a specific vs group entry */ void ipvs_group_remove_entry(virtual_server_t *vs, virtual_server_group_entry_t *vsge) @@ -1188,7 +1432,7 @@ ipvs_group_remove_entry(virtual_server_t *vs, virtual_server_group_entry_t *vsge /* Set vs rule */ if (vsge->is_fwmark) { /* Talk to the IPVS channel */ - ipvs_talk(IP_VS_SO_SET_DELDEST, &srule, &drule, NULL, NULL, NULL, NULL, false); + ipvs_talk(IP_VS_SO_SET_DELDEST, &srule, &drule, NULL, NULL, NULL, NULL, NULL, false); } else ipvs_group_range_cmd(IP_VS_SO_SET_DELDEST, &srule, &drule, vsge); @@ -1209,7 +1453,7 @@ ipvs_group_remove_entry(virtual_server_t *vs, virtual_server_group_entry_t *vsge srule.user.port = inet_sockaddrport(&vsge->addr); srule.user.fwmark = vsge->vfwmark; - ipvs_talk(IP_VS_SO_SET_DEL, &srule, NULL, NULL, NULL, NULL, NULL, false); + ipvs_talk(IP_VS_SO_SET_DEL, &srule, NULL, NULL, NULL, NULL, NULL, NULL, false); } } } diff --git a/tools/keepalived/keepalived/check/ipwrapper.c b/tools/keepalived/keepalived/check/ipwrapper.c old mode 100644 new mode 100755 index 897bd1251..ebaa03351 --- a/tools/keepalived/keepalived/check/ipwrapper.c +++ b/tools/keepalived/keepalived/check/ipwrapper.c @@ -599,7 +599,10 @@ init_service_vs(virtual_server_t * vs) if (!ipvs_cmd(LVS_CMD_ADD_BLKLST, vs, NULL)) return 0; } - + if (vs->whtlst_addr_gname) { + if (!ipvs_cmd(LVS_CMD_ADD_WHTLST, vs, NULL)) + return 0; + } /* Processing real server queue */ if (!init_service_rs(vs)) return false; @@ -1140,6 +1143,73 @@ clear_diff_blklst(virtual_server_t * old_vs) return 1; } +/* Check if a whitelist address entry is in list */ +static int +whtlst_entry_exist(whtlst_addr_entry *whtlst_entry, list l) +{ + element e; + whtlst_addr_entry *entry; + + for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) { + entry = ELEMENT_DATA(e); + if (sockstorage_equal(&entry->addr, &whtlst_entry->addr) && + entry->range == whtlst_entry->range) + return 1; + } + return 0; +} + +/* Clear the diff whtlst address entry of the old vs */ +static int +clear_diff_whtlst_entry(list old, list new, virtual_server_t * old_vs) +{ + element e; + whtlst_addr_entry *whtlst_entry; + + for (e = LIST_HEAD(old); e; ELEMENT_NEXT(e)) { + whtlst_entry = ELEMENT_DATA(e); + if (!whtlst_entry_exist(whtlst_entry, new)) { + log_message(LOG_INFO, "VS [%s-%d] in whitelist address group %s no longer exist\n" + , inet_sockaddrtos(&whtlst_entry->addr) + , whtlst_entry->range + , old_vs->whtlst_addr_gname); + + if (!ipvs_whtlst_remove_entry(old_vs, whtlst_entry)) + return 0; + } + } + + return 1; +} + +/* Clear the diff whitelist address of the old vs */ +static int +clear_diff_whtlst(virtual_server_t * old_vs) +{ + whtlst_addr_group *old; + whtlst_addr_group *new; + + /* + * If old vs didn't own whitelist address group, + * then do nothing and return + */ + if (!old_vs->whtlst_addr_gname) + return 1; + + /* Fetch whitelist address group */ + old = ipvs_get_whtlst_group_by_name(old_vs->whtlst_addr_gname, + old_check_data->whtlst_group); + new = ipvs_get_whtlst_group_by_name(old_vs->whtlst_addr_gname, + check_data->whtlst_group); + + if (!clear_diff_whtlst_entry(old->addr_ip, new->addr_ip, old_vs)) + return 0; + if (!clear_diff_whtlst_entry(old->range, new->range, old_vs)) + return 0; + + return 1; +} + /* When reloading configuration, remove negative diff entries */ void clear_diff_services(list old_checkers_queue) @@ -1196,6 +1266,10 @@ clear_diff_services(list old_checkers_queue) /* perform blacklist address diff */ if (!clear_diff_blklst(vs)) return; + /* perform whitelist address diff */ + if (!clear_diff_whtlst(vs)) + return; + } } } diff --git a/tools/keepalived/keepalived/check/libipvs.c b/tools/keepalived/keepalived/check/libipvs.c index e3074a1bd..2d877efa0 100644 --- a/tools/keepalived/keepalived/check/libipvs.c +++ b/tools/keepalived/keepalived/check/libipvs.c @@ -432,7 +432,6 @@ static void ipvs_fill_blklst_conf(ipvs_service_t *svc, ipvs_blklst_t *blklst, return; } - int ipvs_add_blklst(ipvs_service_t *svc, ipvs_blklst_t *blklst) { struct dp_vs_blklst_conf conf; @@ -455,6 +454,48 @@ int ipvs_del_blklst(ipvs_service_t *svc, ipvs_blklst_t *blklst) return dpvs_setsockopt(SOCKOPT_SET_BLKLST_DEL, &conf, sizeof(conf)); } +/*for white list*/ +static void ipvs_fill_whtlst_conf(ipvs_service_t *svc, ipvs_whtlst_t *whtlst, + struct dp_vs_whtlst_conf *conf) +{ + memset(conf, 0, sizeof(*conf)); + conf->af = svc->af; + conf->proto = svc->user.protocol; + conf->vport = svc->user.port; + conf->fwmark = svc->user.fwmark; + if (svc->af == AF_INET) { + conf->vaddr.in = svc->nf_addr.in; + conf->whtlst.in = whtlst->addr.in; + } else { + conf->vaddr.in6 = svc->nf_addr.in6; + conf->whtlst.in6 = whtlst->addr.in6; + } + + return; +} + +int ipvs_add_whtlst(ipvs_service_t *svc, ipvs_whtlst_t *whtlst) +{ + struct dp_vs_whtlst_conf conf; + + ipvs_func = ipvs_add_whtlst; + + ipvs_fill_whtlst_conf(svc, whtlst, &conf); + + return dpvs_setsockopt(SOCKOPT_SET_WHTLST_ADD, &conf, sizeof(conf)); +} + +int ipvs_del_whtlst(ipvs_service_t *svc, ipvs_whtlst_t *whtlst) +{ + struct dp_vs_whtlst_conf conf; + + ipvs_func = ipvs_del_whtlst; + + ipvs_fill_whtlst_conf(svc, whtlst, &conf); + + return dpvs_setsockopt(SOCKOPT_SET_WHTLST_DEL, &conf, sizeof(conf)); +} + /* for tunnel entry */ static void ipvs_fill_tunnel_conf(ipvs_tunnel_t *tunnel_entry, struct ip_tunnel_param *conf) @@ -991,6 +1032,36 @@ struct dp_vs_blklst_conf_array *ipvs_get_blklsts(void) return array; } +struct dp_vs_whtlst_conf_array *ipvs_get_whtlsts(void) +{ + struct dp_vs_whtlst_conf_array *array, *result; + size_t size; + int i, err; + + ipvs_func = ipvs_get_whtlsts; + + err = dpvs_getsockopt(SOCKOPT_GET_WHTLST_GETALL, NULL, 0, + (void **)&result, &size); + if (err != 0) + return NULL; + if (size < sizeof(*result) + || size != sizeof(*result) + \ + result->naddr * sizeof(struct dp_vs_whtlst_conf)) { + dpvs_sockopt_msg_free(result); + return NULL; + } + if (!(array = malloc(size))) + return NULL; + memcpy(array, result, sizeof(struct dp_vs_whtlst_conf_array)); + for (i = 0; i < result->naddr; i++) { + memcpy(&array->whtlsts[i], &result->whtlsts[i], + sizeof(struct dp_vs_whtlst_conf)); + } + + dpvs_sockopt_msg_free(result); + return array; +} + void ipvs_free_service(ipvs_service_entry_t *p) { free(p); @@ -1028,6 +1099,11 @@ const char *ipvs_strerror(int err) { ipvs_del_blklst, ESRCH, "Service not defined" }, { ipvs_del_blklst, ENOENT, "No such deny address" }, { ipvs_get_blklsts, ESRCH, "Service not defined" }, + { ipvs_add_whtlst, ESRCH, "Service not defined" }, + { ipvs_add_whtlst, EEXIST, "whitelist address already exists" }, + { ipvs_del_whtlst, ESRCH, "Service not defined" }, + { ipvs_del_whtlst, ENOENT, "No such deny address" }, + { ipvs_get_whtlsts, ESRCH, "Service not defined" }, { ipvs_get_dests, ESRCH, "No such service" }, { ipvs_get_service, ESRCH, "No such service" }, { 0, EPERM, "Permission denied (you must be root)" }, diff --git a/tools/keepalived/keepalived/include/check_data.h b/tools/keepalived/keepalived/include/check_data.h index 32e2b2d23..e2b9b6d28 100644 --- a/tools/keepalived/keepalived/include/check_data.h +++ b/tools/keepalived/keepalived/include/check_data.h @@ -131,6 +131,18 @@ typedef struct _blklst_addr_group { list range; } blklst_addr_group; +/* whitelist ip group*/ +typedef struct _whtlst_addr_entry { + struct sockaddr_storage addr; + uint32_t range; +} whtlst_addr_entry; + +typedef struct _whtlst_addr_group { + char *gname; + list addr_ip; + list range; +} whtlst_addr_group; + typedef struct _tunnel_entry { struct sockaddr_storage remote; struct sockaddr_storage local; @@ -237,6 +249,7 @@ typedef struct _virtual_server { unsigned hash_target; char *local_addr_gname; /*local ip address group name*/ char *blklst_addr_gname; /*black list ip group name*/ + char *whtlst_addr_gname; /*white list ip group name*/ char *vip_bind_dev; /*the interface name, vip bindto*/ } virtual_server_t; @@ -253,6 +266,7 @@ typedef struct _check_data { unsigned num_smtp_alert; list laddr_group; list blklst_group; + list whtlst_group; list tunnel_group; } check_data_t; @@ -348,6 +362,9 @@ extern void set_rsgroup(char *); extern void dump_check_data(FILE *, check_data_t *); extern void alloc_blklst_group(char *); extern void alloc_blklst_entry(const vector_t *); +extern void alloc_whtlst_group(char *); +extern void alloc_whtlst_entry(const vector_t *); + extern void alloc_tunnel_entry(char *name); extern void alloc_tunnel(char *gname); diff --git a/tools/keepalived/keepalived/include/dp_vs.h b/tools/keepalived/keepalived/include/dp_vs.h index 76f999014..e81ecca62 100644 --- a/tools/keepalived/keepalived/include/dp_vs.h +++ b/tools/keepalived/keepalived/include/dp_vs.h @@ -25,6 +25,7 @@ #include "conf/inetaddr.h" #include "conf/laddr.h" #include "conf/blklst.h" +#include "conf/whtlst.h" #include "conf/conn.h" #include "conf/ip_tunnel.h" #include "conf/service.h" diff --git a/tools/keepalived/keepalived/include/ip_vs.h b/tools/keepalived/keepalived/include/ip_vs.h index 08cda3859..d43d55d87 100644 --- a/tools/keepalived/keepalived/include/ip_vs.h +++ b/tools/keepalived/keepalived/include/ip_vs.h @@ -85,7 +85,9 @@ #define IP_VS_SO_SET_DELBLKLST (IP_VS_BASE_CTL+19) #define IP_VS_SO_SET_ADDTUNNEL (IP_VS_BASE_CTL+20) #define IP_VS_SO_SET_DELTUNNEL (IP_VS_BASE_CTL+21) -#define IP_VS_SO_SET_MAX IP_VS_SO_SET_DELTUNNEL +#define IP_VS_SO_SET_ADDWHTLST (IP_VS_BASE_CTL+22) +#define IP_VS_SO_SET_DELWHTLST (IP_VS_BASE_CTL+23) +#define IP_VS_SO_SET_MAX IP_VS_SO_SET_DELWHTLST #define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL #define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) @@ -197,6 +199,12 @@ struct ip_vs_blklst_user { union nf_inet_addr addr; }; +struct ip_vs_whtlst_user { + __be32 __addr_v4; + u_int16_t af; + union nf_inet_addr addr; +}; + struct ip_vs_tunnel_user { char ifname[IFNAMSIZ]; char kind[TNLKINDSIZ]; diff --git a/tools/keepalived/keepalived/include/ipvswrapper.h b/tools/keepalived/keepalived/include/ipvswrapper.h index d03c76838..68134f49a 100644 --- a/tools/keepalived/keepalived/include/ipvswrapper.h +++ b/tools/keepalived/keepalived/include/ipvswrapper.h @@ -85,6 +85,8 @@ extern local_addr_group *ipvs_get_laddr_group_by_name(char *, list); extern int ipvs_laddr_remove_entry(virtual_server_t *, local_addr_entry *); extern blklst_addr_group *ipvs_get_blklst_group_by_name(char *, list); extern int ipvs_blklst_remove_entry(virtual_server_t *, blklst_addr_entry *); +extern whtlst_addr_group *ipvs_get_whtlst_group_by_name(char *, list); +extern int ipvs_whtlst_remove_entry(virtual_server_t *, whtlst_addr_entry *); extern int ipvs_tunnel_cmd(int cmd, tunnel_entry *entry); /* Refresh statistics at most every 5 seconds */ diff --git a/tools/keepalived/keepalived/include/ipwrapper.h b/tools/keepalived/keepalived/include/ipwrapper.h index 440858764..af81b9cfd 100755 --- a/tools/keepalived/keepalived/include/ipwrapper.h +++ b/tools/keepalived/keepalived/include/ipwrapper.h @@ -44,6 +44,8 @@ #define LVS_CMD_DEL_LADDR IP_VS_SO_SET_DELLADDR #define LVS_CMD_ADD_BLKLST IP_VS_SO_SET_ADDBLKLST #define LVS_CMD_DEL_BLKLST IP_VS_SO_SET_DELBLKLST +#define LVS_CMD_ADD_WHTLST IP_VS_SO_SET_ADDWHTLST +#define LVS_CMD_DEL_WHTLST IP_VS_SO_SET_DELWHTLST #define LVS_CMD_ADD_TUNNEL IP_VS_SO_SET_ADDTUNNEL #define LVS_CMD_DEL_TUNNEL IP_VS_SO_SET_DELTUNNEL diff --git a/tools/keepalived/keepalived/include/libipvs.h b/tools/keepalived/keepalived/include/libipvs.h index 7a169ff35..5f577564d 100644 --- a/tools/keepalived/keepalived/include/libipvs.h +++ b/tools/keepalived/keepalived/include/libipvs.h @@ -43,7 +43,8 @@ #define OPT_HASHTAG 0x10000000 #define OPT_CPU 0x20000000 #define OPT_EXPIRE_QUIESCENT_CONN 0x40000000 -#define NUMBER_OF_OPT 32 +#define OPT_WHTLST_ADDRESS 0x80000000 +#define NUMBER_OF_OPT 33 #define MINIMUM_IPVS_VERSION_MAJOR 1 #define MINIMUM_IPVS_VERSION_MINOR 1 @@ -72,9 +73,11 @@ typedef struct ip_vs_service_entry_app ipvs_service_entry_t; typedef struct ip_vs_dest_entry_app ipvs_dest_entry_t; typedef struct ip_vs_laddr_user ipvs_laddr_t; typedef struct ip_vs_blklst_user ipvs_blklst_t; +typedef struct ip_vs_whtlst_user ipvs_whtlst_t; typedef struct ip_vs_tunnel_user ipvs_tunnel_t; typedef struct ip_vs_laddr_entry ipvs_laddr_entry_t; typedef struct ip_vs_blklst_entry ipvs_blklst_entry_t; +typedef struct ip_vs_whtlst_entry ipvs_whtlst_entry_t; /* init socket and get ipvs info */ @@ -123,6 +126,10 @@ extern struct ip_vs_get_laddrs *ipvs_get_laddrs(ipvs_service_entry_t *svc, lcore extern int ipvs_add_blklst(ipvs_service_t *svc, ipvs_blklst_t * blklst); extern int ipvs_del_blklst(ipvs_service_t *svc, ipvs_blklst_t * blklst); +/*for add/delete a whitelist ip*/ +extern int ipvs_add_whtlst(ipvs_service_t *svc, ipvs_whtlst_t * whtlst); +extern int ipvs_del_whtlst(ipvs_service_t *svc, ipvs_whtlst_t * whtlst); + /*for add/delete a tunnel*/ extern int ipvs_add_tunnel(ipvs_tunnel_t * tunnel_entry); extern int ipvs_del_tunnel(ipvs_tunnel_t * tunnel_entry); @@ -178,4 +185,6 @@ extern void ipvs_sort_dests(struct ip_vs_get_dests_app *d, ipvs_dest_cmp_t f); +extern struct dp_vs_whtlst_conf_array *ipvs_get_whtlsts(void); + #endif /* _LIBIPVS_H */ From f47b55a2e817a630ea969e077ec8d66c822ccfd6 Mon Sep 17 00:00:00 2001 From: ytwang0320 Date: Tue, 1 Dec 2020 20:58:49 +0800 Subject: [PATCH 03/35] update whitelist --- tools/keepalived/keepalived/check/check_parser.c | 2 +- tools/keepalived/keepalived/check/libipvs.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/keepalived/keepalived/check/check_parser.c b/tools/keepalived/keepalived/check/check_parser.c index 85d5c4c54..d6464e843 100644 --- a/tools/keepalived/keepalived/check/check_parser.c +++ b/tools/keepalived/keepalived/check/check_parser.c @@ -1226,7 +1226,7 @@ init_check_keywords(bool active) install_sublevel_end(); install_keyword("laddr_group_name", &laddr_gname_handler); install_keyword("daddr_group_name", &blklst_gname_handler); - install_keyword("aaddr_group_name", &whtlst_gname_handler); + install_keyword("waddr_group_name", &whtlst_gname_handler); install_keyword("syn_proxy", &syn_proxy_handler); install_keyword("expire_quiescent_conn", &expire_quiescent_handler); install_keyword("vip_bind_dev", &bind_dev_handler); diff --git a/tools/keepalived/keepalived/check/libipvs.c b/tools/keepalived/keepalived/check/libipvs.c index 2d877efa0..e604b2a4a 100644 --- a/tools/keepalived/keepalived/check/libipvs.c +++ b/tools/keepalived/keepalived/check/libipvs.c @@ -1099,8 +1099,8 @@ const char *ipvs_strerror(int err) { ipvs_del_blklst, ESRCH, "Service not defined" }, { ipvs_del_blklst, ENOENT, "No such deny address" }, { ipvs_get_blklsts, ESRCH, "Service not defined" }, - { ipvs_add_whtlst, ESRCH, "Service not defined" }, - { ipvs_add_whtlst, EEXIST, "whitelist address already exists" }, + { ipvs_add_whtlst, ESRCH, "Service not defined" }, + { ipvs_add_whtlst, EEXIST, "whitelist address already exists" }, { ipvs_del_whtlst, ESRCH, "Service not defined" }, { ipvs_del_whtlst, ENOENT, "No such deny address" }, { ipvs_get_whtlsts, ESRCH, "Service not defined" }, From 03e496ff80a393c36f5857fb6828c718f36598a9 Mon Sep 17 00:00:00 2001 From: lixiaoxiao Date: Mon, 23 Nov 2020 19:47:02 +0800 Subject: [PATCH 04/35] Fix DEBUG level do not work with dpdk-stable-18.11.2, because dpdk-stable-18.11.2 rte_logs.dynamic_types default level is "INFO". --- src/global_conf.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/global_conf.c b/src/global_conf.c index ac4959220..1447e313a 100644 --- a/src/global_conf.c +++ b/src/global_conf.c @@ -33,9 +33,10 @@ static void log_current_time(void) static int set_log_level(char *log_level) { - if (!log_level) + if (!log_level) { rte_log_set_global_level(RTE_LOG_DEBUG); - else if (!strncmp(log_level, "EMERG", strlen("EMERG"))) + rte_log_set_level_regexp("user[0-9]", RTE_LOG_DEBUG); + } else if (!strncmp(log_level, "EMERG", strlen("EMERG"))) rte_log_set_global_level(RTE_LOG_EMERG); else if (!strncmp(log_level, "ALERT", strlen("ALERT"))) rte_log_set_global_level(RTE_LOG_ALERT); @@ -49,9 +50,10 @@ static int set_log_level(char *log_level) rte_log_set_global_level(RTE_LOG_NOTICE); else if (!strncmp(log_level, "INFO", strlen("INFO"))) rte_log_set_global_level(RTE_LOG_INFO); - else if (!strncmp(log_level, "DEBUG", strlen("DEBUG"))) + else if (!strncmp(log_level, "DEBUG", strlen("DEBUG"))) { rte_log_set_global_level(RTE_LOG_DEBUG); - else { + rte_log_set_level_regexp("user[0-9]", RTE_LOG_DEBUG); + } else { RTE_LOG(WARNING, CFG_FILE, "%s: illegal log level: %s\n", __func__, log_level); return EDPVS_INVAL; From 1ec2892bdf9e752ca3abd213a00f554605428180 Mon Sep 17 00:00:00 2001 From: ytwang0320 Date: Tue, 22 Dec 2020 16:11:11 +0800 Subject: [PATCH 05/35] added ipv6 white list --- include/ipvs/whtlst.h | 6 +- src/ipvs/ip_vs_blklst.c | 2 +- src/ipvs/ip_vs_core.c | 12 +- src/ipvs/ip_vs_proto_tcp.c | 2 +- src/ipvs/ip_vs_proto_udp.c | 4 +- src/ipvs/ip_vs_synproxy.c | 2 +- src/ipvs/ip_vs_whtlst.c | 132 ++++--- tools/ipvsadm/ipvsadm.c | 3 +- .../keepalived/keepalived/check/check_data.c | 339 +++++++++--------- .../keepalived/check/check_parser.c | 2 +- .../keepalived/keepalived/check/ipvswrapper.c | 15 +- tools/keepalived/keepalived/check/ipwrapper.c | 3 +- tools/keepalived/keepalived/check/libipvs.c | 4 +- .../keepalived/include/check_data.h | 2 +- tools/keepalived/keepalived/include/libipvs.h | 2 +- 15 files changed, 287 insertions(+), 243 deletions(-) diff --git a/include/ipvs/whtlst.h b/include/ipvs/whtlst.h index 007967ce6..2288d5f96 100644 --- a/include/ipvs/whtlst.h +++ b/include/ipvs/whtlst.h @@ -19,19 +19,19 @@ #define __DPVS_WHTLST_H__ #include "conf/common.h" #include "ipvs/service.h" -#include "timer.h" struct whtlst_entry { struct list_head list; + int af; union inet_addr vaddr; uint16_t vport; uint8_t proto; union inet_addr whtlst; }; -struct whtlst_entry *dp_vs_whtlst_lookup(uint8_t proto, const union inet_addr *vaddr, +struct whtlst_entry *dp_vs_whtlst_lookup(int af, uint8_t proto, const union inet_addr *vaddr, uint16_t vport, const union inet_addr *whtlst); -bool dp_vs_whtlst_allow(uint8_t proto, const union inet_addr *vaddr, +bool dp_vs_whtlst_allow(int af, uint8_t proto, const union inet_addr *vaddr, uint16_t vport, const union inet_addr *whtlst); void dp_vs_whtlst_flush(struct dp_vs_service *svc); diff --git a/src/ipvs/ip_vs_blklst.c b/src/ipvs/ip_vs_blklst.c index 376d5072d..9f85dee72 100644 --- a/src/ipvs/ip_vs_blklst.c +++ b/src/ipvs/ip_vs_blklst.c @@ -87,7 +87,7 @@ static int dp_vs_blklst_add_lcore(int af, uint8_t proto, const union inet_addr * hashkey = blklst_hashkey(vaddr, blklst); new = rte_zmalloc("new_blklst_entry", sizeof(struct blklst_entry), 0); - if (new == NULL) + if (unlikely(new == NULL)) return EDPVS_NOMEM; new->af = af; diff --git a/src/ipvs/ip_vs_core.c b/src/ipvs/ip_vs_core.c index 2763e553c..43c4b2749 100644 --- a/src/ipvs/ip_vs_core.c +++ b/src/ipvs/ip_vs_core.c @@ -1236,10 +1236,10 @@ int dp_vs_init(void) err_hooks: dp_vs_stats_term(); err_stats: - dp_vs_blklst_term(); -err_blklst: dp_vs_whtlst_term(); err_whtlst: + dp_vs_blklst_term(); +err_blklst: dp_vs_service_term(); err_serv: dp_vs_sched_term(); @@ -1269,14 +1269,14 @@ int dp_vs_term(void) if (err != EDPVS_OK) RTE_LOG(ERR, IPVS, "fail to terminate term: %s\n", dpvs_strerror(err)); - err = dp_vs_blklst_term(); - if (err != EDPVS_OK) - RTE_LOG(ERR, IPVS, "fail to terminate blklst: %s\n", dpvs_strerror(err)); - err = dp_vs_whtlst_term(); if (err != EDPVS_OK) RTE_LOG(ERR, IPVS, "fail to terminate whtlst: %s\n", dpvs_strerror(err)); + err = dp_vs_blklst_term(); + if (err != EDPVS_OK) + RTE_LOG(ERR, IPVS, "fail to terminate blklst: %s\n", dpvs_strerror(err)); + err = dp_vs_service_term(); if (err != EDPVS_OK) RTE_LOG(ERR, IPVS, "fail to terminate serv: %s\n", dpvs_strerror(err)); diff --git a/src/ipvs/ip_vs_proto_tcp.c b/src/ipvs/ip_vs_proto_tcp.c index ab8c6af21..70454070c 100644 --- a/src/ipvs/ip_vs_proto_tcp.c +++ b/src/ipvs/ip_vs_proto_tcp.c @@ -645,7 +645,7 @@ tcp_conn_lookup(struct dp_vs_proto *proto, const struct dp_vs_iphdr *iph, return NULL; } - if (!dp_vs_whtlst_allow(iph->proto, &iph->daddr, th->dest, &iph->saddr)) { + if (!dp_vs_whtlst_allow(iph->af, iph->proto, &iph->daddr, th->dest, &iph->saddr)) { *drop = true; return NULL; } diff --git a/src/ipvs/ip_vs_proto_udp.c b/src/ipvs/ip_vs_proto_udp.c index 1f77b53ab..82daec0be 100644 --- a/src/ipvs/ip_vs_proto_udp.c +++ b/src/ipvs/ip_vs_proto_udp.c @@ -213,8 +213,8 @@ udp_conn_lookup(struct dp_vs_proto *proto, return NULL; } - if (!dp_vs_whtlst_allow(iph->proto, &iph->daddr, uh->dst_port, - &iph->saddr)) { + if (!dp_vs_whtlst_allow(iph->af, iph->proto, &iph->daddr, + uh->dst_port, &iph->saddr)) { *drop = true; return NULL; } diff --git a/src/ipvs/ip_vs_synproxy.c b/src/ipvs/ip_vs_synproxy.c index fceb2b262..e280538a3 100644 --- a/src/ipvs/ip_vs_synproxy.c +++ b/src/ipvs/ip_vs_synproxy.c @@ -707,7 +707,7 @@ int dp_vs_synproxy_syn_rcv(int af, struct rte_mbuf *mbuf, } /* drop packet if not in whitelist */ - if (!dp_vs_whtlst_allow(iph->proto, &iph->daddr, th->dest, &iph->saddr)) { + if (!dp_vs_whtlst_allow(iph->af, iph->proto, &iph->daddr, th->dest, &iph->saddr)) { goto syn_rcv_out; } } else { diff --git a/src/ipvs/ip_vs_whtlst.c b/src/ipvs/ip_vs_whtlst.c index 5a2209ef8..1faa6eb71 100644 --- a/src/ipvs/ip_vs_whtlst.c +++ b/src/ipvs/ip_vs_whtlst.c @@ -56,7 +56,7 @@ static inline uint32_t whtlst_hashkey(const uint8_t proto, const union inet_addr + dp_vs_whtlst_rnd) & DPVS_WHTLST_TAB_MASK; } -struct whtlst_entry *dp_vs_whtlst_lookup(uint8_t proto, const union inet_addr *vaddr, +struct whtlst_entry *dp_vs_whtlst_lookup(int af, uint8_t proto, const union inet_addr *vaddr, uint16_t vport, const union inet_addr *whtlst) { unsigned hashkey; @@ -64,16 +64,16 @@ struct whtlst_entry *dp_vs_whtlst_lookup(uint8_t proto, const union inet_addr *v hashkey = whtlst_hashkey(proto, vaddr, vport); list_for_each_entry(whtlst_node, &this_whtlst_tab[hashkey], list){ - if (whtlst_node->vaddr.in.s_addr == vaddr->in.s_addr && - whtlst_node->whtlst.in.s_addr == whtlst->in.s_addr && - whtlst_node->proto == proto && - whtlst_node->vport == vport) + if (whtlst_node->af == af && whtlst_node->proto == proto && + whtlst_node->vport == vport && + inet_addr_equal(af, &whtlst_node->vaddr, vaddr) && + inet_addr_equal(af, &whtlst_node->whtlst, whtlst)) return whtlst_node; } return NULL; } -bool dp_vs_whtlst_allow(uint8_t proto, const union inet_addr *vaddr, +bool dp_vs_whtlst_allow(int af, uint8_t proto, const union inet_addr *vaddr, uint16_t vport, const union inet_addr *whtlst) { unsigned hashkey; @@ -84,24 +84,23 @@ bool dp_vs_whtlst_allow(uint8_t proto, const union inet_addr *vaddr, if (&this_whtlst_tab[hashkey] == NULL || list_empty(&this_whtlst_tab[hashkey])) { return true; } - list_for_each_entry(whtlst_node, &this_whtlst_tab[hashkey], list){ - if (whtlst_node->vaddr.in.s_addr == vaddr->in.s_addr && - whtlst_node->whtlst.in.s_addr == whtlst->in.s_addr && - whtlst_node->proto == proto && - whtlst_node->vport == vport) + if (whtlst_node->af == af && whtlst_node->proto == proto && + whtlst_node->vport == vport && + inet_addr_equal(af, &whtlst_node->vaddr, vaddr) && + inet_addr_equal(af, &whtlst_node->whtlst, whtlst)) return true; } return false; } -static int dp_vs_whtlst_add_lcore(uint8_t proto, const union inet_addr *vaddr, +static int dp_vs_whtlst_add_lcore(int af, uint8_t proto, const union inet_addr *vaddr, uint16_t vport, const union inet_addr *whtlst) { unsigned hashkey; struct whtlst_entry *new, *whtlst_node; - whtlst_node = dp_vs_whtlst_lookup(proto, vaddr, vport, whtlst); + whtlst_node = dp_vs_whtlst_lookup(af, proto, vaddr, vport, whtlst); if (whtlst_node) { return EDPVS_EXIST; } @@ -109,25 +108,26 @@ static int dp_vs_whtlst_add_lcore(uint8_t proto, const union inet_addr *vaddr, hashkey = whtlst_hashkey(proto, vaddr, vport); new = rte_zmalloc("new_whtlst_entry", sizeof(struct whtlst_entry), 0); - if (new == NULL) + if (unlikely(new == NULL)) return EDPVS_NOMEM; - memcpy(&new->vaddr, vaddr,sizeof(union inet_addr)); + new->af = af; new->vport = vport; new->proto = proto; - memcpy(&new->whtlst, whtlst,sizeof(union inet_addr)); + memcpy(&new->vaddr, vaddr, sizeof(union inet_addr)); + memcpy(&new->whtlst, whtlst, sizeof(union inet_addr)); list_add(&new->list, &this_whtlst_tab[hashkey]); rte_atomic32_inc(&this_num_whtlsts); return EDPVS_OK; } -static int dp_vs_whtlst_del_lcore(uint8_t proto, const union inet_addr *vaddr, +static int dp_vs_whtlst_del_lcore(int af, uint8_t proto, const union inet_addr *vaddr, uint16_t vport, const union inet_addr *whtlst) { struct whtlst_entry *whtlst_node; - whtlst_node = dp_vs_whtlst_lookup(proto, vaddr, vport, whtlst); + whtlst_node = dp_vs_whtlst_lookup(af, proto, vaddr, vport, whtlst); if (whtlst_node != NULL) { list_del(&whtlst_node->list); rte_free(whtlst_node); @@ -137,7 +137,13 @@ static int dp_vs_whtlst_del_lcore(uint8_t proto, const union inet_addr *vaddr, return EDPVS_NOTEXIST; } -static int dp_vs_whtlst_add(uint8_t proto, const union inet_addr *vaddr, +static uint32_t whtlst_msg_seq(void) +{ + static uint32_t counter = 0; + return counter++; +} + +static int dp_vs_whtlst_add(int af, uint8_t proto, const union inet_addr *vaddr, uint16_t vport, const union inet_addr *whtlst) { lcoreid_t cid = rte_lcore_id(); @@ -151,24 +157,25 @@ static int dp_vs_whtlst_add(uint8_t proto, const union inet_addr *vaddr, } memset(&cf, 0, sizeof(struct dp_vs_whtlst_conf)); - memcpy(&(cf.vaddr), vaddr,sizeof(union inet_addr)); + memcpy(&(cf.vaddr), vaddr, sizeof(union inet_addr)); memcpy(&(cf.whtlst), whtlst, sizeof(union inet_addr)); + cf.af = af; cf.vport = vport; cf.proto = proto; /*set whtlst ip on master lcore*/ - err = dp_vs_whtlst_add_lcore(proto, vaddr, vport, whtlst); + err = dp_vs_whtlst_add_lcore(af, proto, vaddr, vport, whtlst); if (err) { RTE_LOG(INFO, SERVICE, "[%s] fail to set whtlst ip\n", __func__); return err; } /*set whtlst ip on all slave lcores*/ - msg = msg_make(MSG_TYPE_WHTLST_ADD, 0, DPVS_MSG_MULTICAST, + msg = msg_make(MSG_TYPE_WHTLST_ADD, whtlst_msg_seq(), DPVS_MSG_MULTICAST, cid, sizeof(struct dp_vs_whtlst_conf), &cf); if (!msg) return EDPVS_NOMEM; - err = multicast_msg_send(msg, 0, NULL); + err = multicast_msg_send(msg, DPVS_MSG_F_ASYNC, NULL); if (err != EDPVS_OK) { msg_destroy(&msg); RTE_LOG(INFO, SERVICE, "[%s] fail to send multicast message\n", __func__); @@ -179,7 +186,7 @@ static int dp_vs_whtlst_add(uint8_t proto, const union inet_addr *vaddr, return EDPVS_OK; } -static int dp_vs_whtlst_del(uint8_t proto, const union inet_addr *vaddr, +static int dp_vs_whtlst_del(int af, uint8_t proto, const union inet_addr *vaddr, uint16_t vport, const union inet_addr *whtlst) { lcoreid_t cid = rte_lcore_id(); @@ -193,13 +200,14 @@ static int dp_vs_whtlst_del(uint8_t proto, const union inet_addr *vaddr, } memset(&cf, 0, sizeof(struct dp_vs_whtlst_conf)); - memcpy(&(cf.vaddr), vaddr,sizeof(union inet_addr)); + memcpy(&(cf.vaddr), vaddr, sizeof(union inet_addr)); memcpy(&(cf.whtlst), whtlst, sizeof(union inet_addr)); + cf.af = af; cf.vport = vport; cf.proto = proto; /*del whtlst ip on master lcores*/ - err = dp_vs_whtlst_del_lcore(proto, vaddr, vport, whtlst); + err = dp_vs_whtlst_del_lcore(af, proto, vaddr, vport, whtlst); if (err) { RTE_LOG(INFO, SERVICE, "[%s] fail to del whtlst ip\n", __func__); return err; @@ -210,7 +218,7 @@ static int dp_vs_whtlst_del(uint8_t proto, const union inet_addr *vaddr, cid, sizeof(struct dp_vs_whtlst_conf), &cf); if (!msg) return EDPVS_NOMEM; - err = multicast_msg_send(msg, 0, NULL); + err = multicast_msg_send(msg, DPVS_MSG_F_ASYNC, NULL); if (err != EDPVS_OK) { RTE_LOG(INFO, SERVICE, "[%s] fail to send multicast message\n", __func__); return err; @@ -227,8 +235,8 @@ void dp_vs_whtlst_flush(struct dp_vs_service *svc) for (hash = 0; hash < DPVS_WHTLST_TAB_SIZE; hash++) { list_for_each_entry_safe(entry, next, &this_whtlst_tab[hash], list) { - if (entry->vaddr.in.s_addr == svc->addr.in.s_addr) - dp_vs_whtlst_del(entry->proto, &entry->vaddr, + if (entry->af == svc->af && inet_addr_equal(svc->af, &entry->vaddr, &svc->addr)) + dp_vs_whtlst_del(svc->af, entry->proto, &entry->vaddr, entry->vport, &entry->whtlst); } } @@ -242,7 +250,7 @@ static void dp_vs_whtlst_flush_all(void) for (hash = 0; hash < DPVS_WHTLST_TAB_SIZE; hash++) { list_for_each_entry_safe(entry, next, &this_whtlst_tab[hash], list) { - dp_vs_whtlst_del(entry->proto, &entry->vaddr, + dp_vs_whtlst_del(entry->af, entry->proto, &entry->vaddr, entry->vport, &entry->whtlst); } } @@ -262,11 +270,13 @@ static int whtlst_sockopt_set(sockoptid_t opt, const void *conf, size_t size) switch (opt) { case SOCKOPT_SET_WHTLST_ADD: - err = dp_vs_whtlst_add(whtlst_conf->proto, &whtlst_conf->vaddr, + err = dp_vs_whtlst_add(whtlst_conf->af, + whtlst_conf->proto, &whtlst_conf->vaddr, whtlst_conf->vport, &whtlst_conf->whtlst); break; case SOCKOPT_SET_WHTLST_DEL: - err = dp_vs_whtlst_del(whtlst_conf->proto, &whtlst_conf->vaddr, + err = dp_vs_whtlst_del(whtlst_conf->af, + whtlst_conf->proto, &whtlst_conf->vaddr, whtlst_conf->vport, &whtlst_conf->whtlst); break; default: @@ -277,11 +287,11 @@ static int whtlst_sockopt_set(sockoptid_t opt, const void *conf, size_t size) return err; } -static void whtlst_fill_conf(int af, struct dp_vs_whtlst_conf *cf, +static void whtlst_fill_conf(struct dp_vs_whtlst_conf *cf, const struct whtlst_entry *entry) { memset(cf, 0 ,sizeof(*cf)); - cf->af = af; + cf->af = entry->af; cf->vaddr = entry->vaddr; cf->whtlst = entry->whtlst; cf->proto = entry->proto; @@ -299,7 +309,7 @@ static int whtlst_sockopt_get(sockoptid_t opt, const void *conf, size_t size, naddr = rte_atomic32_read(&this_num_whtlsts); *outsize = sizeof(struct dp_vs_whtlst_conf_array) + naddr * sizeof(struct dp_vs_whtlst_conf); - *out = rte_calloc_socket(NULL, 1, *outsize, 0, rte_socket_id()); + *out = rte_calloc(NULL, 1, *outsize, 0); if (!(*out)) return EDPVS_NOMEM; array = *out; @@ -309,7 +319,7 @@ static int whtlst_sockopt_get(sockoptid_t opt, const void *conf, size_t size, list_for_each_entry(entry, &this_whtlst_tab[hash], list) { if (off >= naddr) break; - whtlst_fill_conf(AF_INET, &array->whtlsts[off++], entry); + whtlst_fill_conf(&array->whtlsts[off++], entry); } } @@ -323,16 +333,16 @@ static int whtlst_msg_process(bool add, struct dpvs_msg *msg) int err; assert(msg); - if (msg->len != sizeof(struct dp_vs_whtlst_conf)){ + if (msg->len != sizeof(struct dp_vs_whtlst_conf)) { RTE_LOG(ERR, SERVICE, "%s: bad message.\n", __func__); return EDPVS_INVAL; } cf = (struct dp_vs_whtlst_conf *)msg->data; if (add) - err = dp_vs_whtlst_add_lcore(cf->proto, &cf->vaddr, cf->vport, &cf->whtlst); + err = dp_vs_whtlst_add_lcore(cf->af, cf->proto, &cf->vaddr, cf->vport, &cf->whtlst); else - err = dp_vs_whtlst_del_lcore(cf->proto, &cf->vaddr, cf->vport, &cf->whtlst); + err = dp_vs_whtlst_del_lcore(cf->af, cf->proto, &cf->vaddr, cf->vport, &cf->whtlst); if (err != EDPVS_OK) RTE_LOG(ERR, SERVICE, "%s: fail to %s whtlst: %s.\n", __func__, add ? "add" : "del", dpvs_strerror(err)); @@ -364,10 +374,10 @@ static int whtlst_lcore_init(void *args) { int i; if (!rte_lcore_is_enabled(rte_lcore_id())) - return EDPVS_DISABLED; - this_whtlst_tab = rte_malloc_socket(NULL, + return EDPVS_DISABLED; + this_whtlst_tab = rte_malloc(NULL, sizeof(struct list_head) * DPVS_WHTLST_TAB_SIZE, - RTE_CACHE_LINE_SIZE, rte_socket_id()); + RTE_CACHE_LINE_SIZE); if (!this_whtlst_tab) return EDPVS_NOMEM; @@ -391,6 +401,37 @@ static int whtlst_lcore_term(void *args) return EDPVS_OK; } +static int whtlst_unregister_msg_cb(void) +{ + struct dpvs_msg_type msg_type; + int err; + + memset(&msg_type, 0, sizeof(struct dpvs_msg_type)); + msg_type.type = MSG_TYPE_WHTLST_ADD; + msg_type.mode = DPVS_MSG_MULTICAST; + msg_type.prio = MSG_PRIO_NORM; + msg_type.cid = rte_lcore_id(); + msg_type.unicast_msg_cb = whtlst_add_msg_cb; + err = msg_type_mc_unregister(&msg_type); + if (err != EDPVS_OK) { + RTE_LOG(ERR, SERVICE, "%s: fail to unregister msg.\n", __func__); + return err; + } + + memset(&msg_type, 0, sizeof(struct dpvs_msg_type)); + msg_type.type = MSG_TYPE_WHTLST_DEL; + msg_type.mode = DPVS_MSG_MULTICAST; + msg_type.prio = MSG_PRIO_NORM; + msg_type.cid = rte_lcore_id(); + msg_type.unicast_msg_cb = whtlst_del_msg_cb; + err = msg_type_mc_unregister(&msg_type); + if (err != EDPVS_OK) { + RTE_LOG(ERR, SERVICE, "%s: fail to unregister msg.\n", __func__); + return err; + } + return EDPVS_OK; +} + int dp_vs_whtlst_init(void) { int err; @@ -432,8 +473,10 @@ int dp_vs_whtlst_init(void) return err; } - if ((err = sockopt_register(&whtlst_sockopts)) != EDPVS_OK) + if ((err = sockopt_register(&whtlst_sockopts)) != EDPVS_OK) { + whtlst_unregister_msg_cb(); return err; + } dp_vs_whtlst_rnd = (uint32_t)random(); return EDPVS_OK; @@ -444,6 +487,9 @@ int dp_vs_whtlst_term(void) int err; lcoreid_t cid; + if ((err = whtlst_unregister_msg_cb()) != EDPVS_OK) + return err; + if ((err = sockopt_unregister(&whtlst_sockopts)) != EDPVS_OK) return err; diff --git a/tools/ipvsadm/ipvsadm.c b/tools/ipvsadm/ipvsadm.c index 1d3383140..6f74782ce 100644 --- a/tools/ipvsadm/ipvsadm.c +++ b/tools/ipvsadm/ipvsadm.c @@ -202,14 +202,13 @@ static const char* optnames[] = { "pe" , "local-address" , "blklst-address", - "whtlst-address", "synproxy" , "ifname" , "sockpair" , "hash-target", "cpu", "expire-quiescent", - "wlst", + "whtlst-address", }; /* diff --git a/tools/keepalived/keepalived/check/check_data.c b/tools/keepalived/keepalived/check/check_data.c index fe2d3ee6e..f08b53df0 100644 --- a/tools/keepalived/keepalived/check/check_data.c +++ b/tools/keepalived/keepalived/check/check_data.c @@ -158,8 +158,8 @@ alloc_whtlst_entry(const vector_t *strvec) new = (whtlst_addr_entry *) MALLOC(sizeof (whtlst_addr_entry)); inet_stor(vector_slot(strvec, 0), &new->range); - if (new->range == UINT32_MAX) - new->range = 0; + if (new->range == UINT32_MAX) + new->range = 0; inet_stosockaddr(vector_slot(strvec, 0), NULL, &new->addr); if (!new->range) @@ -174,231 +174,230 @@ alloc_whtlst_entry(const vector_t *strvec) static void free_vsg(void *data) { - virtual_server_group_t *vsg = data; - FREE_PTR(vsg->gname); - free_list(&vsg->addr_range); - free_list(&vsg->vfwmark); - FREE(vsg); + virtual_server_group_t *vsg = data; + FREE_PTR(vsg->gname); + free_list(&vsg->addr_range); + free_list(&vsg->vfwmark); + FREE(vsg); } static void dump_vsg(FILE *fp, const void *data) { - const virtual_server_group_t *vsg = data; + const virtual_server_group_t *vsg = data; - conf_write(fp, " ------< Virtual server group >------"); - conf_write(fp, " Virtual Server Group = %s", vsg->gname); - dump_list(fp, vsg->addr_range); - dump_list(fp, vsg->vfwmark); + conf_write(fp, " ------< Virtual server group >------"); + conf_write(fp, " Virtual Server Group = %s", vsg->gname); + dump_list(fp, vsg->addr_range); + dump_list(fp, vsg->vfwmark); } static void free_vsg_entry(void *data) { - FREE(data); + FREE(data); } static void dump_vsg_entry(FILE *fp, const void *data) { - const virtual_server_group_entry_t *vsg_entry = data; - uint16_t start; - - if (vsg_entry->is_fwmark) { - conf_write(fp, " FWMARK = %u", vsg_entry->vfwmark); - conf_write(fp, " Alive: %u IPv4, %u IPv6", - vsg_entry->fwm4_alive, vsg_entry->fwm6_alive); - } else { - if (vsg_entry->range) { - start = vsg_entry->addr.ss_family == AF_INET ? - ntohl(((const struct sockaddr_in*)&vsg_entry->addr)->sin_addr.s_addr) & 0xFF : - ntohs(((const struct sockaddr_in6*)&vsg_entry->addr)->sin6_addr.s6_addr16[7]); - conf_write(fp, - vsg_entry->addr.ss_family == AF_INET ? - " VIP Range = %s-%u, VPORT = %d" : - " VIP Range = %s-%x, VPORT = %d", - inet_sockaddrtos(&vsg_entry->addr), - start + vsg_entry->range, - ntohs(inet_sockaddrport(&vsg_entry->addr))); - } else - conf_write(fp, " VIP = %s, VPORT = %d" - , inet_sockaddrtos(&vsg_entry->addr) - , ntohs(inet_sockaddrport(&vsg_entry->addr))); - conf_write(fp, " Alive: %u tcp, %u udp, %u sctp", - vsg_entry->tcp_alive, vsg_entry->udp_alive, vsg_entry->sctp_alive); - } - conf_write(fp, " reloaded = %s", vsg_entry->reloaded ? "True" : "False"); + const virtual_server_group_entry_t *vsg_entry = data; + uint16_t start; + + if (vsg_entry->is_fwmark) { + conf_write(fp, " FWMARK = %u", vsg_entry->vfwmark); + conf_write(fp, " Alive: %u IPv4, %u IPv6", + vsg_entry->fwm4_alive, vsg_entry->fwm6_alive); + } else { + if (vsg_entry->range) { + start = vsg_entry->addr.ss_family == AF_INET ? + ntohl(((const struct sockaddr_in*)&vsg_entry->addr)->sin_addr.s_addr) & 0xFF : + ntohs(((const struct sockaddr_in6*)&vsg_entry->addr)->sin6_addr.s6_addr16[7]); + conf_write(fp, + vsg_entry->addr.ss_family == AF_INET ? + " VIP Range = %s-%u, VPORT = %d" : + " VIP Range = %s-%x, VPORT = %d", + inet_sockaddrtos(&vsg_entry->addr), + start + vsg_entry->range, + ntohs(inet_sockaddrport(&vsg_entry->addr))); + } else + conf_write(fp, " VIP = %s, VPORT = %d" + , inet_sockaddrtos(&vsg_entry->addr) + , ntohs(inet_sockaddrport(&vsg_entry->addr))); + + conf_write(fp, " Alive: %u tcp, %u udp, %u sctp", + vsg_entry->tcp_alive, vsg_entry->udp_alive, vsg_entry->sctp_alive); + } + conf_write(fp, " reloaded = %s", vsg_entry->reloaded ? "True" : "False"); } void alloc_vsg(const char *gname) { - virtual_server_group_t *new; + virtual_server_group_t *new; - new = (virtual_server_group_t *) MALLOC(sizeof(virtual_server_group_t)); - new->gname = STRDUP(gname); - new->addr_range = alloc_list(free_vsg_entry, dump_vsg_entry); - new->vfwmark = alloc_list(free_vsg_entry, dump_vsg_entry); + new = (virtual_server_group_t *) MALLOC(sizeof(virtual_server_group_t)); + new->gname = STRDUP(gname); + new->addr_range = alloc_list(free_vsg_entry, dump_vsg_entry); + new->vfwmark = alloc_list(free_vsg_entry, dump_vsg_entry); - list_add(check_data->vs_group, new); + list_add(check_data->vs_group, new); } void alloc_vsg_entry(const vector_t *strvec) { - virtual_server_group_t *vsg = LIST_TAIL_DATA(check_data->vs_group); - virtual_server_group_entry_t *new; - virtual_server_group_entry_t *old; - uint32_t start; - element e; - const char *port_str; - uint32_t range; - unsigned fwmark; - - new = (virtual_server_group_entry_t *) MALLOC(sizeof(virtual_server_group_entry_t)); - - if (!strcmp(strvec_slot(strvec, 0), "fwmark")) { - if (!read_unsigned_strvec(strvec, 1, &fwmark, 0, UINT32_MAX, true)) { - report_config_error(CONFIG_GENERAL_ERROR, "(%s): fwmark '%s' must be in [0, %u] - ignoring", vsg->gname, strvec_slot(strvec, 1), UINT32_MAX); - FREE(new); - return; - } - new->vfwmark = fwmark; - new->is_fwmark = true; - list_add(vsg->vfwmark, new); - } else { - if (!inet_stor(strvec_slot(strvec, 0), &range)) { - FREE(new); - return; - } - new->range = (uint32_t)range; - - if (vector_size(strvec) >= 2) { - /* Don't pass a port number of 0. This was added v2.0.7 to support legacy - * configuration since previously having no port wasn't allowed. */ - port_str = strvec_slot(strvec, 1); - if (!port_str[strspn(port_str, "0")]) - port_str = NULL; - } - else - port_str = NULL; - - if (inet_stosockaddr(strvec_slot(strvec, 0), port_str, &new->addr)) { - report_config_error(CONFIG_GENERAL_ERROR, "Invalid virtual server group IP address%s %s%s%s - skipping", strvec_slot(strvec, 0), - port_str ? "/port" : "", port_str ? "/" : "", port_str ? port_str : ""); - FREE(new); - return; - } + virtual_server_group_t *vsg = LIST_TAIL_DATA(check_data->vs_group); + virtual_server_group_entry_t *new; + virtual_server_group_entry_t *old; + uint32_t start; + element e; + const char *port_str; + uint32_t range; + unsigned fwmark; + + new = (virtual_server_group_entry_t *) MALLOC(sizeof(virtual_server_group_entry_t)); + + if (!strcmp(strvec_slot(strvec, 0), "fwmark")) { + if (!read_unsigned_strvec(strvec, 1, &fwmark, 0, UINT32_MAX, true)) { + report_config_error(CONFIG_GENERAL_ERROR, "(%s): fwmark '%s' must be in [0, %u] - ignoring", vsg->gname, strvec_slot(strvec, 1), UINT32_MAX); + FREE(new); + return; + } + new->vfwmark = fwmark; + new->is_fwmark = true; + list_add(vsg->vfwmark, new); + } else { + if (!inet_stor(strvec_slot(strvec, 0), &range)) { + FREE(new); + return; + } + new->range = (uint32_t)range; + + if (vector_size(strvec) >= 2) { + /* Don't pass a port number of 0. This was added v2.0.7 to support legacy + * configuration since previously having no port wasn't allowed. */ + port_str = strvec_slot(strvec, 1); + if (!port_str[strspn(port_str, "0")]) + port_str = NULL; + } + else + port_str = NULL; + + if (inet_stosockaddr(strvec_slot(strvec, 0), port_str, &new->addr)) { + report_config_error(CONFIG_GENERAL_ERROR, "Invalid virtual server group IP address%s %s%s%s - skipping", strvec_slot(strvec, 0), + port_str ? "/port" : "", port_str ? "/" : "", port_str ? port_str : ""); + FREE(new); + return; + } #ifndef LIBIPVS_USE_NL - if (new->addr.ss_family != AF_INET) { - report_config_error(CONFIG_GENERAL_ERROR, "IPVS does not support IPv6 in this build - skipping %s", strvec_slot(strvec, 0)); - FREE(new); - return; - } + if (new->addr.ss_family != AF_INET) { + report_config_error(CONFIG_GENERAL_ERROR, "IPVS does not support IPv6 in this build - skipping %s", strvec_slot(strvec, 0)); + FREE(new); + return; + } #endif - - /* Ensure the address family matches any previously configured addresses */ - if (!LIST_ISEMPTY(vsg->addr_range)) { - e = LIST_HEAD(vsg->addr_range); - old = ELEMENT_DATA(e); - if (old->addr.ss_family != new->addr.ss_family) { - report_config_error(CONFIG_GENERAL_ERROR, "Cannot mix IPv4 and IPv6 in virtual server group - %s", vsg->gname); - FREE(new); - return; - } - } - - /* If no range specified, new->range == UINT32_MAX */ - if (new->range == UINT32_MAX) - new->range = 0; - else { - if (new->addr.ss_family == AF_INET) - start = ntohl(((struct sockaddr_in *)&new->addr)->sin_addr.s_addr) & 0xFF; - else - start = ntohs(((struct sockaddr_in6 *)&new->addr)->sin6_addr.s6_addr16[7]); - - if (start >= new->range) { - report_config_error(CONFIG_GENERAL_ERROR, "Address range end is not greater than address range start - %s - skipping", strvec_slot(strvec, 0)); - FREE(new); - return; - } - new->range -= start; - } - - new->is_fwmark = false; - list_add(vsg->addr_range, new); - } + /* Ensure the address family matches any previously configured addresses */ + if (!LIST_ISEMPTY(vsg->addr_range)) { + e = LIST_HEAD(vsg->addr_range); + old = ELEMENT_DATA(e); + if (old->addr.ss_family != new->addr.ss_family) { + report_config_error(CONFIG_GENERAL_ERROR, "Cannot mix IPv4 and IPv6 in virtual server group - %s", vsg->gname); + FREE(new); + return; + } + } + + /* If no range specified, new->range == UINT32_MAX */ + if (new->range == UINT32_MAX) + new->range = 0; + else { + if (new->addr.ss_family == AF_INET) + start = ntohl(((struct sockaddr_in *)&new->addr)->sin_addr.s_addr) & 0xFF; + else + start = ntohs(((struct sockaddr_in6 *)&new->addr)->sin6_addr.s6_addr16[7]); + + if (start >= new->range) { + report_config_error(CONFIG_GENERAL_ERROR, "Address range end is not greater than address range start - %s - skipping", strvec_slot(strvec, 0)); + FREE(new); + return; + } + new->range -= start; + } + new->is_fwmark = false; + list_add(vsg->addr_range, new); + } } /* Virtual server facility functions */ static void free_vs(void *data) { - virtual_server_t *vs = data; - FREE_CONST_PTR(vs->vsgname); - FREE_CONST_PTR(vs->virtualhost); - FREE_PTR(vs->s_svr); - free_list(&vs->rs); - free_notify_script(&vs->notify_quorum_up); - free_notify_script(&vs->notify_quorum_down); - FREE_PTR(vs->local_addr_gname); - FREE_PTR(vs->blklst_addr_gname); + virtual_server_t *vs = data; + FREE_CONST_PTR(vs->vsgname); + FREE_CONST_PTR(vs->virtualhost); + FREE_PTR(vs->s_svr); + free_list(&vs->rs); + free_notify_script(&vs->notify_quorum_up); + free_notify_script(&vs->notify_quorum_down); + FREE_PTR(vs->local_addr_gname); + FREE_PTR(vs->blklst_addr_gname); FREE_PTR(vs->whtlst_addr_gname); - FREE_PTR(vs->vip_bind_dev); - FREE(vs); + FREE_PTR(vs->vip_bind_dev); + FREE(vs); } static void dump_forwarding_method(FILE *fp, const char *prefix, const real_server_t *rs) { - const char *fwd_method = "forwarding method = "; + const char *fwd_method = "forwarding method = "; #ifdef _HAVE_IPVS_TUN_TYPE_ - const char *csum_str = ""; - const char *tun_type = "TUN, type = "; + const char *csum_str = ""; + const char *tun_type = "TUN, type = "; #endif - switch (rs->forwarding_method) { - case IP_VS_CONN_F_MASQ: - conf_write(fp, " %s%sNAT", prefix, fwd_method); - break; - case IP_VS_CONN_F_DROUTE: - conf_write(fp, " %s%sDR", prefix, fwd_method); - break; - case IP_VS_CONN_F_TUNNEL: + switch (rs->forwarding_method) { + case IP_VS_CONN_F_MASQ: + conf_write(fp, " %s%sNAT", prefix, fwd_method); + break; + case IP_VS_CONN_F_DROUTE: + conf_write(fp, " %s%sDR", prefix, fwd_method); + break; + case IP_VS_CONN_F_TUNNEL: #ifdef _HAVE_IPVS_TUN_TYPE_ - if (rs->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_IPIP) - conf_write(fp, " %s%s%sIPIP", prefix, fwd_method, tun_type); - else { + if (rs->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_IPIP) + conf_write(fp, " %s%s%sIPIP", prefix, fwd_method, tun_type); + else { #ifdef _HAVE_IPVS_TUN_CSUM_ - csum_str = rs->tun_flags == IP_VS_TUNNEL_ENCAP_FLAG_NOCSUM ? ", no checksum" : - rs->tun_flags == IP_VS_TUNNEL_ENCAP_FLAG_CSUM ? ", checksum" : - rs->tun_flags == IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ? ", remote checksum" : - ", unknown checksum type"; + csum_str = rs->tun_flags == IP_VS_TUNNEL_ENCAP_FLAG_NOCSUM ? ", no checksum" : + rs->tun_flags == IP_VS_TUNNEL_ENCAP_FLAG_CSUM ? ", checksum" : + rs->tun_flags == IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ? ", remote checksum" : + ", unknown checksum type"; #endif - if (rs->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) - conf_write(fp, " %s%sGUE, port = %u%s", fwd_method, tun_type, ntohs(rs->tun_port), csum_str); + if (rs->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) + conf_write(fp, " %s%sGUE, port = %u%s", fwd_method, tun_type, ntohs(rs->tun_port), csum_str); #ifdef _HAVE_IPVS_TUN_GRE_ - else if (rs->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) - conf_write(fp, " %s%sGRE%s", fwd_method, tun_type, csum_str); + else if (rs->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) + conf_write(fp, " %s%sGRE%s", fwd_method, tun_type, csum_str); #endif - } + } #else - conf_write(fp, " %s%sTUN", prefix, fwd_method); + conf_write(fp, " %s%sTUN", prefix, fwd_method); #endif - break; - case IP_VS_CONN_F_FULLNAT: - conf_write(fp, "default forwarding method = FNAT"); - break; - case IP_VS_CONN_F_SNAT: - conf_write(fp, "default forwarding method = SNAT"); - break; - } + break; + case IP_VS_CONN_F_FULLNAT: + conf_write(fp, "default forwarding method = FNAT"); + break; + case IP_VS_CONN_F_SNAT: + conf_write(fp, "default forwarding method = SNAT"); + break; + } } static void dump_vs(FILE *fp, const void *data) { - const virtual_server_t *vs = data; + const virtual_server_t *vs = data; conf_write(fp, " ------< Virtual server >------"); if (vs->vsgname) diff --git a/tools/keepalived/keepalived/check/check_parser.c b/tools/keepalived/keepalived/check/check_parser.c index d6464e843..0ec32a225 100644 --- a/tools/keepalived/keepalived/check/check_parser.c +++ b/tools/keepalived/keepalived/check/check_parser.c @@ -1226,7 +1226,7 @@ init_check_keywords(bool active) install_sublevel_end(); install_keyword("laddr_group_name", &laddr_gname_handler); install_keyword("daddr_group_name", &blklst_gname_handler); - install_keyword("waddr_group_name", &whtlst_gname_handler); + install_keyword("waddr_group_name", &whtlst_gname_handler); install_keyword("syn_proxy", &syn_proxy_handler); install_keyword("expire_quiescent_conn", &expire_quiescent_handler); install_keyword("vip_bind_dev", &bind_dev_handler); diff --git a/tools/keepalived/keepalived/check/ipvswrapper.c b/tools/keepalived/keepalived/check/ipvswrapper.c index fa601ea28..0f9d41327 100755 --- a/tools/keepalived/keepalived/check/ipvswrapper.c +++ b/tools/keepalived/keepalived/check/ipvswrapper.c @@ -1275,13 +1275,14 @@ ipvs_rm_bentry_from_vsg(blklst_addr_entry *blklst_entry, whtlst_addr_entry *whtl if (whtlst_entry->range) ipvs_whtlst_range_cmd(IP_VS_SO_SET_DELWHTLST, whtlst_entry, srule); else { - memset(&whtlst_rule, 0, sizeof(ipvs_whtlst_t)); - whtlst_rule.af = whtlst_entry->addr.ss_family; - if (whtlst_entry->addr.ss_family == AF_INET6) - inet_sockaddrip6(&whtlst_entry->addr, &whtlst_rule.addr.in6); - else - whtlst_rule.addr.ip = inet_sockaddrip4(&whtlst_entry->addr); - ipvs_talk(IP_VS_SO_SET_DELWHTLST, srule, NULL, NULL, NULL, NULL, &whtlst_rule, NULL, false); + memset(&whtlst_rule, 0, sizeof(ipvs_whtlst_t)); + whtlst_rule.af = whtlst_entry->addr.ss_family; + if (whtlst_entry->addr.ss_family == AF_INET6) + inet_sockaddrip6(&whtlst_entry->addr, &whtlst_rule.addr.in6); + else + whtlst_rule.addr.ip = inet_sockaddrip4(&whtlst_entry->addr); + + ipvs_talk(IP_VS_SO_SET_DELWHTLST, srule, NULL, NULL, NULL, NULL, &whtlst_rule, NULL, false); } } continue; diff --git a/tools/keepalived/keepalived/check/ipwrapper.c b/tools/keepalived/keepalived/check/ipwrapper.c index ebaa03351..5d74ec2f1 100755 --- a/tools/keepalived/keepalived/check/ipwrapper.c +++ b/tools/keepalived/keepalived/check/ipwrapper.c @@ -1188,7 +1188,6 @@ clear_diff_whtlst(virtual_server_t * old_vs) { whtlst_addr_group *old; whtlst_addr_group *new; - /* * If old vs didn't own whitelist address group, * then do nothing and return @@ -1266,7 +1265,7 @@ clear_diff_services(list old_checkers_queue) /* perform blacklist address diff */ if (!clear_diff_blklst(vs)) return; - /* perform whitelist address diff */ + /* perform whitelist address diff */ if (!clear_diff_whtlst(vs)) return; diff --git a/tools/keepalived/keepalived/check/libipvs.c b/tools/keepalived/keepalived/check/libipvs.c index e604b2a4a..a5105d9ec 100644 --- a/tools/keepalived/keepalived/check/libipvs.c +++ b/tools/keepalived/keepalived/check/libipvs.c @@ -1099,8 +1099,8 @@ const char *ipvs_strerror(int err) { ipvs_del_blklst, ESRCH, "Service not defined" }, { ipvs_del_blklst, ENOENT, "No such deny address" }, { ipvs_get_blklsts, ESRCH, "Service not defined" }, - { ipvs_add_whtlst, ESRCH, "Service not defined" }, - { ipvs_add_whtlst, EEXIST, "whitelist address already exists" }, + { ipvs_add_whtlst, ESRCH, "Service not defined" }, + { ipvs_add_whtlst, EEXIST, "whitelist address already exists" }, { ipvs_del_whtlst, ESRCH, "Service not defined" }, { ipvs_del_whtlst, ENOENT, "No such deny address" }, { ipvs_get_whtlsts, ESRCH, "Service not defined" }, diff --git a/tools/keepalived/keepalived/include/check_data.h b/tools/keepalived/keepalived/include/check_data.h index e2b9b6d28..1d90dc320 100644 --- a/tools/keepalived/keepalived/include/check_data.h +++ b/tools/keepalived/keepalived/include/check_data.h @@ -266,7 +266,7 @@ typedef struct _check_data { unsigned num_smtp_alert; list laddr_group; list blklst_group; - list whtlst_group; + list whtlst_group; list tunnel_group; } check_data_t; diff --git a/tools/keepalived/keepalived/include/libipvs.h b/tools/keepalived/keepalived/include/libipvs.h index 5f577564d..0e0b3cdc1 100644 --- a/tools/keepalived/keepalived/include/libipvs.h +++ b/tools/keepalived/keepalived/include/libipvs.h @@ -43,7 +43,7 @@ #define OPT_HASHTAG 0x10000000 #define OPT_CPU 0x20000000 #define OPT_EXPIRE_QUIESCENT_CONN 0x40000000 -#define OPT_WHTLST_ADDRESS 0x80000000 +#define OPT_WHTLST_ADDRESS 0x80000000 #define NUMBER_OF_OPT 33 #define MINIMUM_IPVS_VERSION_MAJOR 1 From 4aecd356892a869330cc5ebe3c7e7154b6096f1a Mon Sep 17 00:00:00 2001 From: Chion Deng Date: Tue, 5 Jan 2021 15:24:33 +0800 Subject: [PATCH 06/35] fix: UDP checksum not correct if IP options present --- include/ipv4.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/ipv4.h b/include/ipv4.h index 055fdb50f..994ced020 100644 --- a/include/ipv4.h +++ b/include/ipv4.h @@ -187,12 +187,15 @@ static inline uint16_t ip4_udptcp_cksum(struct ipv4_hdr *iph, const void *l4_hdr { uint16_t csum; uint16_t total_length = iph->total_length; + uint8_t version_ihl = iph->version_ihl; iph->total_length = htons(ntohs(total_length) - ((iph->version_ihl & 0xf) << 2) + sizeof(struct ipv4_hdr)); + iph->version_ihl = (version_ihl & 0xf0) | (sizeof(struct ipv4_hdr) >> 2); csum = rte_ipv4_udptcp_cksum(iph, l4_hdr); iph->total_length = total_length; + iph->version_ihl = version_ihl; return csum; } From 2215ec09b95d398667351d1aad2084f13a82e43e Mon Sep 17 00:00:00 2001 From: ytwang0320 Date: Tue, 19 Jan 2021 09:35:21 +0800 Subject: [PATCH 07/35] update whtlist and blklist error log --- src/ipvs/ip_vs_blklst.c | 17 +++++++++-------- src/ipvs/ip_vs_whtlst.c | 22 +++++++++++++--------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/src/ipvs/ip_vs_blklst.c b/src/ipvs/ip_vs_blklst.c index 004982577..2e514ec9d 100644 --- a/src/ipvs/ip_vs_blklst.c +++ b/src/ipvs/ip_vs_blklst.c @@ -145,8 +145,8 @@ static int dp_vs_blklst_add(int af, uint8_t proto, const union inet_addr *vaddr, /*set blklst ip on master lcore*/ err = dp_vs_blklst_add_lcore(af, proto, vaddr, vport, blklst); - if (err) { - RTE_LOG(INFO, SERVICE, "[%s] fail to set blklst ip -- %s\n", __func__, dpvs_strerror(err)); + if (err && err != EDPVS_EXIST) { + RTE_LOG(ERR, SERVICE, "[%s] fail to set blklst ip -- %s\n", __func__, dpvs_strerror(err)); return err; } @@ -189,7 +189,6 @@ static int dp_vs_blklst_del(int af, uint8_t proto, const union inet_addr *vaddr, /*del blklst ip on master lcores*/ err = dp_vs_blklst_del_lcore(af, proto, vaddr, vport, blklst); if (err) { - RTE_LOG(INFO, SERVICE, "%s: fail to del blklst ip -- %s\n", __func__, dpvs_strerror(err)); return err; } @@ -323,13 +322,15 @@ static int blklst_msg_process(bool add, struct dpvs_msg *msg) } cf = (struct dp_vs_blklst_conf *)msg->data; - if (add) + if (add) { err = dp_vs_blklst_add_lcore(cf->af, cf->proto, &cf->vaddr, cf->vport, &cf->blklst); - else + if (err && err != EDPVS_EXIST) { + RTE_LOG(ERR, SERVICE, "%s: fail to add blklst: %s.\n", __func__, dpvs_strerror(err)); + } + } + else { err = dp_vs_blklst_del_lcore(cf->af, cf->proto, &cf->vaddr, cf->vport, &cf->blklst); - if (err != EDPVS_OK) - RTE_LOG(ERR, SERVICE, "%s: fail to %s blklst: %s.\n", - __func__, add ? "add" : "del", dpvs_strerror(err)); + } return err; } diff --git a/src/ipvs/ip_vs_whtlst.c b/src/ipvs/ip_vs_whtlst.c index 1faa6eb71..e0be714bf 100644 --- a/src/ipvs/ip_vs_whtlst.c +++ b/src/ipvs/ip_vs_whtlst.c @@ -165,8 +165,8 @@ static int dp_vs_whtlst_add(int af, uint8_t proto, const union inet_addr *vaddr, /*set whtlst ip on master lcore*/ err = dp_vs_whtlst_add_lcore(af, proto, vaddr, vport, whtlst); - if (err) { - RTE_LOG(INFO, SERVICE, "[%s] fail to set whtlst ip\n", __func__); + if (err && err != EDPVS_EXIST) { + RTE_LOG(ERR, SERVICE, "[%s] fail to set whtlst ip\n", __func__); return err; } @@ -209,7 +209,6 @@ static int dp_vs_whtlst_del(int af, uint8_t proto, const union inet_addr *vaddr, /*del whtlst ip on master lcores*/ err = dp_vs_whtlst_del_lcore(af, proto, vaddr, vport, whtlst); if (err) { - RTE_LOG(INFO, SERVICE, "[%s] fail to del whtlst ip\n", __func__); return err; } @@ -235,7 +234,10 @@ void dp_vs_whtlst_flush(struct dp_vs_service *svc) for (hash = 0; hash < DPVS_WHTLST_TAB_SIZE; hash++) { list_for_each_entry_safe(entry, next, &this_whtlst_tab[hash], list) { - if (entry->af == svc->af && inet_addr_equal(svc->af, &entry->vaddr, &svc->addr)) + if (entry->af == svc->af + && entry->vport == svc->port + && entry->proto == svc->proto + && inet_addr_equal(svc->af, &entry->vaddr, &svc->addr)) dp_vs_whtlst_del(svc->af, entry->proto, &entry->vaddr, entry->vport, &entry->whtlst); } @@ -339,13 +341,15 @@ static int whtlst_msg_process(bool add, struct dpvs_msg *msg) } cf = (struct dp_vs_whtlst_conf *)msg->data; - if (add) + if (add) { err = dp_vs_whtlst_add_lcore(cf->af, cf->proto, &cf->vaddr, cf->vport, &cf->whtlst); - else + if (err && err != EDPVS_EXIST) { + RTE_LOG(ERR, SERVICE, "%s: fail to add whtlst: %s.\n", __func__, dpvs_strerror(err)); + } + } + else { err = dp_vs_whtlst_del_lcore(cf->af, cf->proto, &cf->vaddr, cf->vport, &cf->whtlst); - if (err != EDPVS_OK) - RTE_LOG(ERR, SERVICE, "%s: fail to %s whtlst: %s.\n", - __func__, add ? "add" : "del", dpvs_strerror(err)); + } return err; } From 379584aa86cd996a6c4a8467a60680f9adf65dc2 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Mon, 25 Jan 2021 11:44:52 +0800 Subject: [PATCH 08/35] patch: dpdk eal memory debug patch for dpdk-stable-18.11.2 --- .../0006-enable-dpdk-eal-memory-debug.patch | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 patch/dpdk-stable-18.11.2/0006-enable-dpdk-eal-memory-debug.patch diff --git a/patch/dpdk-stable-18.11.2/0006-enable-dpdk-eal-memory-debug.patch b/patch/dpdk-stable-18.11.2/0006-enable-dpdk-eal-memory-debug.patch new file mode 100644 index 000000000..c286c15f3 --- /dev/null +++ b/patch/dpdk-stable-18.11.2/0006-enable-dpdk-eal-memory-debug.patch @@ -0,0 +1,68 @@ +From 19652889ed74b09aba6f22dfa96b19c009a7309a Mon Sep 17 00:00:00 2001 +From: ywc +Date: Mon, 25 Jan 2021 10:27:52 +0800 +Subject: [PATCH] enable dpdk eal memory debug + +--- + config/common_base | 2 +- + lib/librte_eal/common/include/rte_malloc.h | 15 +++++++++++++++ + lib/librte_eal/common/rte_malloc.c | 4 ++++ + 3 files changed, 20 insertions(+), 1 deletion(-) + +diff --git a/config/common_base b/config/common_base +index d12ae98..765ae2e 100644 +--- a/config/common_base ++++ b/config/common_base +@@ -94,7 +94,7 @@ CONFIG_RTE_EAL_IGB_UIO=n + CONFIG_RTE_EAL_VFIO=n + CONFIG_RTE_MAX_VFIO_GROUPS=64 + CONFIG_RTE_MAX_VFIO_CONTAINERS=64 +-CONFIG_RTE_MALLOC_DEBUG=n ++CONFIG_RTE_MALLOC_DEBUG=y + CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n + CONFIG_RTE_USE_LIBBSD=n + +diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h +index e0be13c..f3bcdc6 100644 +--- a/lib/librte_eal/common/include/rte_malloc.h ++++ b/lib/librte_eal/common/include/rte_malloc.h +@@ -214,6 +214,21 @@ struct rte_malloc_socket_stats { + rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket); + + /** ++ * Check the header/tailer cookies of memory pointed to by the provided pointer. ++ * ++ * This pointer must have been returned by a previous call to ++ * rte_malloc(), rte_zmalloc(), rte_calloc() or rte_realloc(). ++ * ++ * @param ptr ++ * The pointer to memory to be checked. ++ * @return ++ * - true if the header/tailer cookies are OK. ++ * - Otherwise, false. ++ */ ++int ++rte_memmory_ok(void *ptr); ++ ++/** + * Frees the memory space pointed to by the provided pointer. + * + * This pointer must have been returned by a previous call to +diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c +index 47c2bec..1fab27c 100644 +--- a/lib/librte_eal/common/rte_malloc.c ++++ b/lib/librte_eal/common/rte_malloc.c +@@ -26,6 +26,10 @@ + #include "malloc_heap.h" + #include "eal_memalloc.h" + ++int rte_memmory_ok(void *addr) ++{ ++ return malloc_elem_cookies_ok(RTE_PTR_SUB(addr, MALLOC_ELEM_HEADER_LEN)); ++} + + /* Free the memory space back to heap */ + void rte_free(void *addr) +-- +1.8.3.1 + From 82c07b4ce673cbb12a921c257b7eb1ea6ca69891 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Mon, 25 Jan 2021 11:31:47 +0800 Subject: [PATCH 09/35] CI: add CI workflow file --- .github/workflows/build.yaml | 59 ++++++++++++++++++++++++++++++++++++ .github/workflows/run.yaml | 28 +++++++++++++++++ README.md | 2 ++ 3 files changed, 89 insertions(+) create mode 100644 .github/workflows/build.yaml create mode 100644 .github/workflows/run.yaml diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml new file mode 100644 index 000000000..cb43a6d8f --- /dev/null +++ b/.github/workflows/build.yaml @@ -0,0 +1,59 @@ +name: Build + +on: + push: + branches: [master, devel] + release: + branches: [master] + types: [published] + schedule: + - cron: '30 2 * * 1' + pull_request: + branches: [master, devel] + types: [labeled] + +jobs: + build-basic: + runs-on: self-hosted + env: + RTE_SDK: /data/dpdk/intel/dpdk-stable-18.11.2 + RTE_TARGET: x86_64-native-linuxapp-gcc + steps: + - uses: actions/checkout@v2 + - name: make + run: make -j32 + + build-mlnx: + runs-on: self-hosted + env: + RTE_SDK: /data/dpdk/mlnx/dpdk-stable-18.11.2 + RTE_TARGET: x86_64-native-linuxapp-gcc + steps: + - uses: actions/checkout@v2 + - name: config + run: sed -i 's/^CONFIG_MLX5=./CONFIG_MLX5=y/' src/config.mk + - name: make + run: make -j32 + + build-debug: + runs-on: self-hosted + env: + RTE_SDK: /data/dpdk/intel/dpdk-stable-18.11.2 + RTE_TARGET: x86_64-native-linuxapp-gcc + steps: + - uses: actions/checkout@v2 + - name: config + run: sed -i 's/#CFLAGS +=/CFLAGS +=/' src/config.mk && sed -i 's/^#DEBUG := 1/DEBUG := 1/' src/Makefile + - name: make + run: make -j32 + + build-olddpdk: + runs-on: self-hosted + env: + RTE_SDK: /data/dpdk/intel/dpdk-stable-17.11.6 + RTE_TARGET: x86_64-native-linuxapp-gcc + steps: + - uses: actions/checkout@v2 + - name: make + run: make -j32 + diff --git a/.github/workflows/run.yaml b/.github/workflows/run.yaml new file mode 100644 index 000000000..cf3350f1b --- /dev/null +++ b/.github/workflows/run.yaml @@ -0,0 +1,28 @@ +name: Run + +on: + push: + branches: [master, devel] + release: + branches: [master] + types: [published] + schedule: + - cron: '30 3 * * 1' + pull_request: + branches: [master, devel] + types: [labeled] + +jobs: + run-dpvs: + runs-on: self-hosted + env: + RTE_SDK: /data/dpdk/intel/dpdk-stable-18.11.2 + RTE_TARGET: x86_64-native-linuxapp-gcc + steps: + - uses: actions/checkout@v2 + - name: make + run: make -j32 + - name: install + run: make install + - name: run-dpvs + run: sudo dpvsci $(pwd)/bin/dpvs diff --git a/README.md b/README.md index 899671b48..8db4504eb 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +![Build](https://github.com/iqiyi/dpvs/workflows/Build/badge.svg) ![Run](https://github.com/iqiyi/dpvs/workflows/Run/badge.svg) + ![dpvs-logo.png](./pic/DPVS-logo.png) # Introduction From 0cbef0ff12e7ee7b04bcb90a7d8a99f4d4f16dd3 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Fri, 5 Feb 2021 11:08:59 +0800 Subject: [PATCH 10/35] bugfix: fix issue #711 --- include/conf/conn.h | 29 + include/conf/dest.h | 21 +- include/conf/service.h | 15 + include/ipvs/conn.h | 25 +- include/ipvs/service.h | 24 +- src/ipvs/ip_vs_synproxy.c | 2 +- tools/keepalived/keepalived/include/ip_vs.h | 760 ++++++------------ tools/keepalived/keepalived/include/libipvs.h | 1 + 8 files changed, 338 insertions(+), 539 deletions(-) diff --git a/include/conf/conn.h b/include/conf/conn.h index e9eaf7a51..aa18ce904 100644 --- a/include/conf/conn.h +++ b/include/conf/conn.h @@ -23,6 +23,35 @@ #include "inet.h" #include "conf/sockopts.h" +/* + * IPVS Conn flags derived from "linux/ip_vs.h". + * + * Note: We just keep the macros used by dpvs/keepalived, and the value for some macros + * are changed. Besides, some new macros are added for dpvs. + */ +/* Conn flags used by DPVS and Keepalived */ +#define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ +#define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */ +#define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */ +#define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */ +#define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */ +#define IP_VS_CONN_F_FULLNAT 0x0005 /* full nat mode */ +#define IP_VS_CONN_F_SNAT 0x0006 /* snat mode */ +#define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ + +#define IP_VS_CONN_F_SYNPROXY 0x0010 /* synproxy switch flag*/ +#define IP_VS_CONN_F_EXPIRE_QUIESCENT 0x0020 /* expire quiescent conns */ + +/* Conn flags used by DPVS only */ +#define IP_VS_CONN_F_HASHED 0x0100 /* hashed entry */ +#define IP_VS_CONN_F_INACTIVE 0x0200 /* not established */ +#define IP_VS_CONN_F_TEMPLATE 0x0400 /* template, not connection */ +#define IP_VS_CONN_F_ONE_PACKET 0x0800 /* forward only one packet */ + +#define IP_VS_CONN_F_IN_TIMER 0x1000 /* timer attached */ +#define IP_VS_CONN_F_REDIRECT_HASHED 0x2000 /* hashed in redirect table */ +#define IP_VS_CONN_F_NOFASTXMIT 0x4000 /* do not fastxmit */ + /* How many connections returned at most for one sockopt ctrl msg. * Decrease it for saving memory, increase it for better performace. */ diff --git a/include/conf/dest.h b/include/conf/dest.h index bfd6b1724..af312a178 100644 --- a/include/conf/dest.h +++ b/include/conf/dest.h @@ -19,17 +19,20 @@ #define __DPVS_DEST_CONF_H__ #include "conf/service.h" +#include "conf/conn.h" -/* must consistent with IP_VS_CONN_F_XXX (libipvs-2.6/ip_vs.h) */ +/* + * DPVS_FWD_XXX should always be the same with IP_VS_CONN_F_XXX. + */ enum dpvs_fwd_mode { - DPVS_FWD_MASQ = 0, - DPVS_FWD_LOCALNODE = 1, - DPVS_FWD_MODE_TUNNEL = 2, - DPVS_FWD_MODE_DR = 3, - DPVS_FWD_MODE_BYPASS = 4, - DPVS_FWD_MODE_FNAT = 5, - DPVS_FWD_MODE_NAT = DPVS_FWD_MASQ, - DPVS_FWD_MODE_SNAT = 6, + DPVS_FWD_MASQ = IP_VS_CONN_F_MASQ, + DPVS_FWD_LOCALNODE = IP_VS_CONN_F_LOCALNODE, + DPVS_FWD_MODE_TUNNEL = IP_VS_CONN_F_TUNNEL, + DPVS_FWD_MODE_DR = IP_VS_CONN_F_DROUTE, + DPVS_FWD_MODE_BYPASS = IP_VS_CONN_F_BYPASS, + DPVS_FWD_MODE_FNAT = IP_VS_CONN_F_FULLNAT, + DPVS_FWD_MODE_SNAT = IP_VS_CONN_F_SNAT, + DPVS_FWD_MODE_NAT = DPVS_FWD_MASQ, }; enum { diff --git a/include/conf/service.h b/include/conf/service.h index 6c70bfd7b..ce38acb95 100644 --- a/include/conf/service.h +++ b/include/conf/service.h @@ -28,6 +28,21 @@ #define DP_VS_SCHEDNAME_MAXLEN 16 +/* + * Virtual Service Flags derived from "linux/ip_vs.h" + */ +#define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ +#define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ +#define IP_VS_SVC_F_ONEPACKET 0x0004 /* one-packet scheduling */ +#define IP_VS_SVC_F_SCHED1 0x0008 /* scheduler flag 1 */ +#define IP_VS_SVC_F_SCHED2 0x0010 /* scheduler flag 2 */ +#define IP_VS_SVC_F_SCHED3 0x0020 /* scheduler flag 3 */ +#define IP_VS_SVC_F_SIP_HASH 0x0100 /* sip hash target */ +#define IP_VS_SVC_F_QID_HASH 0x0200 /* quic cid hash target */ +#define IP_VS_SVC_F_MATCH 0x0400 /* snat match */ +#define IP_VS_SVC_F_SCHED_SH_FALLBACK IP_VS_SVC_F_SCHED1 /* SH fallback */ +#define IP_VS_SVC_F_SCHED_SH_PORT IP_VS_SVC_F_SCHED2 /* SH use port */ + struct dp_vs_service_conf { /* virtual service addresses */ uint16_t af; diff --git a/include/ipvs/conn.h b/include/ipvs/conn.h index d6518ed73..c49f535f2 100644 --- a/include/ipvs/conn.h +++ b/include/ipvs/conn.h @@ -35,17 +35,20 @@ enum { DPVS_CONN_DIR_MAX, }; -enum { - DPVS_CONN_F_HASHED = 0x0040, - DPVS_CONN_F_REDIRECT_HASHED = 0x0080, - DPVS_CONN_F_INACTIVE = 0x0100, - DPVS_CONN_F_IN_TIMER = 0x0200, - DPVS_CONN_F_EXPIRE_QUIESCENT = 0x4000, - DPVS_CONN_F_SYNPROXY = 0x8000, - DPVS_CONN_F_TEMPLATE = 0x1000, - DPVS_CONN_F_NOFASTXMIT = 0x2000, - DPVS_CONN_F_ONE_PACKET = 0x0400, -}; +/* + * DPVS_CONN_F_XXX should always be the same with IP_VS_CONN_F_XXX. + */ +/* Conn flags used by both DPVS and Keepalived*/ +#define DPVS_CONN_F_SYNPROXY IP_VS_CONN_F_SYNPROXY +#define DPVS_CONN_F_EXPIRE_QUIESCENT IP_VS_CONN_F_EXPIRE_QUIESCENT +/* Conn flags used by DPVS only */ +#define DPVS_CONN_F_HASHED IP_VS_CONN_F_HASHED +#define DPVS_CONN_F_INACTIVE IP_VS_CONN_F_INACTIVE +#define DPVS_CONN_F_TEMPLATE IP_VS_CONN_F_TEMPLATE +#define DPVS_CONN_F_ONE_PACKET IP_VS_CONN_F_ONE_PACKET +#define DPVS_CONN_F_IN_TIMER IP_VS_CONN_F_IN_TIMER +#define DPVS_CONN_F_REDIRECT_HASHED IP_VS_CONN_F_REDIRECT_HASHED +#define DPVS_CONN_F_NOFASTXMIT IP_VS_CONN_F_NOFASTXMIT struct dp_vs_conn_param { int af; diff --git a/include/ipvs/service.h b/include/ipvs/service.h index 4bfcdca82..8c1adeb14 100644 --- a/include/ipvs/service.h +++ b/include/ipvs/service.h @@ -31,18 +31,18 @@ #include "conf/match.h" #include "conf/service.h" -#define RTE_LOGTYPE_SERVICE RTE_LOGTYPE_USER3 -#define DP_VS_SVC_F_PERSISTENT 0x0001 /* peristent port */ -#define DP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ -#define DP_VS_SVC_F_ONEPACKET 0x0004 /* one-packet scheduling */ -#define DP_VS_SVC_F_SCHED1 0x0008 /* scheduler flag 1 */ -#define DP_VS_SVC_F_SCHED2 0x0010 /* scheduler flag 2 */ -#define DP_VS_SVC_F_SCHED3 0x0020 /* scheduler flag 3 */ - -#define DP_VS_SVC_F_SIP_HASH 0x0100 /* sip hash target */ -#define DP_VS_SVC_F_QID_HASH 0x0200 /* quic cid hash target */ -#define DP_VS_SVC_F_MATCH 0x0400 /* snat match */ -#define DP_VS_SVC_F_SYNPROXY 0x8000 /* synrpoxy flag */ +#define RTE_LOGTYPE_SERVICE RTE_LOGTYPE_USER3 + +/* DP_VS_SVC_F_XXX should always be the same with IP_VS_SVC_F_XXX */ +#define DP_VS_SVC_F_PERSISTENT IP_VS_SVC_F_PERSISTENT +#define DP_VS_SVC_F_HASHED IP_VS_SVC_F_HASHED +#define DP_VS_SVC_F_ONEPACKET IP_VS_SVC_F_ONEPACKET +#define DP_VS_SVC_F_SCHED1 IP_VS_SVC_F_SCHED1 +#define DP_VS_SVC_F_SCHED2 IP_VS_SVC_F_SCHED2 +#define DP_VS_SVC_F_SCHED3 IP_VS_SVC_F_SCHED3 +#define DP_VS_SVC_F_SIP_HASH IP_VS_SVC_F_SIP_HASH +#define DP_VS_SVC_F_QID_HASH IP_VS_SVC_F_QID_HASH +#define DP_VS_SVC_F_MATCH IP_VS_SVC_F_MATCH /* virtual service */ struct dp_vs_service { diff --git a/src/ipvs/ip_vs_synproxy.c b/src/ipvs/ip_vs_synproxy.c index dabd19ad1..406285e47 100644 --- a/src/ipvs/ip_vs_synproxy.c +++ b/src/ipvs/ip_vs_synproxy.c @@ -692,7 +692,7 @@ int dp_vs_synproxy_syn_rcv(int af, struct rte_mbuf *mbuf, if (th->syn && !th->ack && !th->rst && !th->fin && (svc = dp_vs_service_lookup(af, iph->proto, &iph->daddr, th->dest, 0, NULL, NULL, NULL, rte_lcore_id())) && - (svc->flags & DP_VS_SVC_F_SYNPROXY)) { + (svc->flags & DPVS_CONN_F_SYNPROXY)) { /* if service's weight is zero (non-active realserver), * do noting and drop the packet */ if (svc->weight == 0) { diff --git a/tools/keepalived/keepalived/include/ip_vs.h b/tools/keepalived/keepalived/include/ip_vs.h index d43d55d87..e8a7e0bc2 100644 --- a/tools/keepalived/keepalived/include/ip_vs.h +++ b/tools/keepalived/keepalived/include/ip_vs.h @@ -1,8 +1,3 @@ -/* - * IP Virtual Server - * data structure and functionality definitions - */ - #ifndef KEEPALIVED_IP_VS_H #define KEEPALIVED_IP_VS_H @@ -11,12 +6,12 @@ /* System includes */ #include #include -#include /* Force inclusion of net/if.h before linux/if.h */ +#include /* Force inclusion of net/if.h before linux/if.h */ #include #include /* Prior to Linux 4.2 have to include linux/in.h and linux/in6.h * or linux/netlink.h to include linux/netfilter.h */ -#include /* For nf_inet_addr */ +#include /* For nf_inet_addr */ #include #include "dp_vs.h" @@ -27,610 +22,363 @@ // ///////////////////////////////////////////////////////////////////////////////////////// -#define IP_VS_VERSION_CODE 0x010201 -#define NVERSION(version) \ - (version >> 16) & 0xFF, \ - (version >> 8) & 0xFF, \ - version & 0xFF - -/* Virtual Service Flags */ -#define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ -#define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ -#define IP_VS_SVC_F_ONEPACKET 0x0004 /* one-packet scheduling */ -#define IP_VS_SVC_F_SCHED1 0x0008 /* scheduler flag 1 */ -#define IP_VS_SVC_F_SCHED2 0x0010 /* scheduler flag 2 */ -#define IP_VS_SVC_F_SCHED3 0x0020 /* scheduler flag 3 */ - -#define IP_VS_SVC_F_SIP_HASH 0x0100 /* sip hash target */ -#define IP_VS_SVC_F_QID_HASH 0x0200 /* quic cid hash target */ -#define IP_VS_SVC_F_MATCH 0x0400 /* snat match */ +#define IP_VS_VERSION_CODE 0x010808 /* DPVS v1.8.8 */ +#define NVERSION(version) \ + (version >> 16) & 0xFF, \ + (version >> 8) & 0xFF, \ + version & 0xFF -#define IP_VS_SVC_F_SCHED_SH_FALLBACK IP_VS_SVC_F_SCHED1 /* SH fallback */ -#define IP_VS_SVC_F_SCHED_SH_PORT IP_VS_SVC_F_SCHED2 /* SH use port */ - -#define IP_VS_CONN_F_EXPIRE_QUIESCENT 0x4000 /* expire quiescent conns */ -#define IP_VS_CONN_F_SYNPROXY 0x8000 /* synproxy switch flag*/ - -/* Destination Server Flags */ -#define IP_VS_DEST_F_AVAILABLE 0x0001 /* server is available */ -#define IP_VS_DEST_F_OVERLOAD 0x0002 /* server is overloaded */ +#define IP_VS_SCHEDNAME_MAXLEN DP_VS_SCHEDNAME_MAXLEN +#define IP_VS_PENAME_MAXLEN 16 +#define IP_VS_IFNAME_MAXLEN 16 +#define IP_VS_PEDATA_MAXLEN 255 /* IPVS sync daemon states */ -#define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */ -#define IP_VS_STATE_MASTER 0x0001 /* started as master */ -#define IP_VS_STATE_BACKUP 0x0002 /* started as backup */ - -/* IPVS socket options */ -#define IP_VS_BASE_CTL (64+1024+64) /* base */ - -#define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */ -#define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1) -#define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2) -#define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3) -#define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4) -#define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5) -#define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6) -#define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7) -#define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8) -#define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9) -#define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10) -#define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11) -#define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12) -#define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13) -#define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14) -#define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15) -#define IP_VS_SO_SET_ADDLADDR (IP_VS_BASE_CTL+16) -#define IP_VS_SO_SET_DELLADDR (IP_VS_BASE_CTL+17) -#define IP_VS_SO_SET_ADDBLKLST (IP_VS_BASE_CTL+18) -#define IP_VS_SO_SET_DELBLKLST (IP_VS_BASE_CTL+19) -#define IP_VS_SO_SET_ADDTUNNEL (IP_VS_BASE_CTL+20) -#define IP_VS_SO_SET_DELTUNNEL (IP_VS_BASE_CTL+21) -#define IP_VS_SO_SET_ADDWHTLST (IP_VS_BASE_CTL+22) -#define IP_VS_SO_SET_DELWHTLST (IP_VS_BASE_CTL+23) -#define IP_VS_SO_SET_MAX IP_VS_SO_SET_DELWHTLST - -#define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL -#define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) -#define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2) -#define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3) -#define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4) -#define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */ -#define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6) -#define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7) -#define IP_VS_SO_GET_LADDRS (IP_VS_BASE_CTL+8) -#define IP_VS_SO_GET_MAX IP_VS_SO_GET_LADDRS +#define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */ +#define IP_VS_STATE_MASTER 0x0001 /* started as master */ +#define IP_VS_STATE_BACKUP 0x0002 /* started as backup */ + +/* VRRP IPRoute Flags */ +#define IPROUTE_DEL 0 +#define IPROUTE_ADD 1 + +#define IPADDRESS_DEL 0 +#define IPADDRESS_ADD 1 + +/* IPVS command options */ +#define IP_VS_BASE_CTL (64+1024+64) /* base */ + +#define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */ +#define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1) +#define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2) +#define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3) +#define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4) +#define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5) +#define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6) +#define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7) +#define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8) +#define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9) +#define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10) +#define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11) +#define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12) +#define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13) +#define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14) +#define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15) +#define IP_VS_SO_SET_ADDLADDR (IP_VS_BASE_CTL+16) +#define IP_VS_SO_SET_DELLADDR (IP_VS_BASE_CTL+17) +#define IP_VS_SO_SET_ADDBLKLST (IP_VS_BASE_CTL+18) +#define IP_VS_SO_SET_DELBLKLST (IP_VS_BASE_CTL+19) +#define IP_VS_SO_SET_ADDTUNNEL (IP_VS_BASE_CTL+20) +#define IP_VS_SO_SET_DELTUNNEL (IP_VS_BASE_CTL+21) +#define IP_VS_SO_SET_ADDWHTLST (IP_VS_BASE_CTL+22) +#define IP_VS_SO_SET_DELWHTLST (IP_VS_BASE_CTL+23) +#define IP_VS_SO_SET_MAX IP_VS_SO_SET_DELWHTLST + +#define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL +#define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) +#define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2) +#define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3) +#define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4) +#define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */ +#define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6) +#define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7) +#define IP_VS_SO_GET_LADDRS (IP_VS_BASE_CTL+8) +#define IP_VS_SO_GET_MAX IP_VS_SO_GET_LADDRS /* - * IPVS Connection Flags - * Only flags 0..15 are sent to backup server + * The struct ip_vs_service_user and struct ip_vs_dest_user are + * used to set IPVS rules through setsockopt. */ -#define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ -#define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ -#define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */ -#define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */ -#define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */ -#define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */ -#define IP_VS_CONN_F_FULLNAT 0x0005 /* full nat mode */ -#define IP_VS_CONN_F_SNAT 0x0006 /* SNAT mode */ -#define IP_VS_CONN_F_SYNC 0x0020 /* entry created by sync */ -#define IP_VS_CONN_F_HASHED 0x0040 /* hashed entry */ -#define IP_VS_CONN_F_NOOUTPUT 0x0080 /* no output packets */ -#define IP_VS_CONN_F_INACTIVE 0x0100 /* not established */ -#define IP_VS_CONN_F_OUT_SEQ 0x0200 /* must do output seq adjust */ -#define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */ -#define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ -#define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ -#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ -#define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ - -/* Initial bits allowed in backup server */ -#define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \ - IP_VS_CONN_F_NOOUTPUT | \ - IP_VS_CONN_F_INACTIVE | \ - IP_VS_CONN_F_SEQ_MASK | \ - IP_VS_CONN_F_NO_CPORT | \ - IP_VS_CONN_F_TEMPLATE \ - ) - -/* Bits allowed to update in backup server */ -#define IP_VS_CONN_F_BACKUP_UPD_MASK (IP_VS_CONN_F_INACTIVE | \ - IP_VS_CONN_F_SEQ_MASK) - -/* Flags that are not sent to backup server start from bit 16 */ -#define IP_VS_CONN_F_NFCT (1 << 16) /* use netfilter conntrack */ - -/* Connection flags from destination that can be changed by user space */ -#define IP_VS_CONN_F_DEST_MASK (IP_VS_CONN_F_FWD_MASK | \ - IP_VS_CONN_F_ONE_PACKET | \ - IP_VS_CONN_F_NFCT | \ - 0) - -#define IP_VS_SCHEDNAME_MAXLEN 16 -#define IP_VS_PENAME_MAXLEN 16 -#define IP_VS_IFNAME_MAXLEN 16 -#define IP_VS_PEDATA_MAXLEN 255 - struct ip_vs_service_user { - /* virtual service addresses */ - u_int16_t protocol; - __be32 __addr_v4; /* virtual ip address */ - __be16 port; - u_int32_t fwmark; /* firwall mark of service */ - - /* virtual service options */ - char sched_name[IP_VS_SCHEDNAME_MAXLEN]; - unsigned flags; /* virtual service flags */ - unsigned timeout; /* persistent timeout in sec */ - unsigned conn_timeout; - __be32 netmask; /* persistent netmask */ - unsigned bps; - unsigned limit_proportion; - - char srange[256]; - char drange[256]; - char iifname[IFNAMSIZ]; - char oifname[IFNAMSIZ]; + /* virtual service addresses */ + u_int16_t protocol; + __be32 __addr_v4; /* virtual ip address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* virtual service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout in sec */ + unsigned conn_timeout; + __be32 netmask; /* persistent netmask */ + unsigned bps; + unsigned limit_proportion; + + char srange[256]; + char drange[256]; + char iifname[IFNAMSIZ]; + char oifname[IFNAMSIZ]; }; struct ip_vs_dest_user { - /* destination server address */ - __be32 addr; - __be16 port; + /* destination server address */ + __be32 addr; + __be16 port; - /* real server options */ - unsigned int conn_flags; /* connection flags */ - int weight; /* destination weight */ + /* real server options */ + unsigned int conn_flags; /* connection flags */ + int weight; /* destination weight */ - /* thresholds for active connections */ - __u32 u_threshold; /* upper threshold */ - __u32 l_threshold; /* lower threshold */ + /* thresholds for active connections */ + __u32 u_threshold; /* upper threshold */ + __u32 l_threshold; /* lower threshold */ }; struct ip_vs_laddr_user { - __be32 __addr_v4; - u_int16_t af; - union nf_inet_addr addr; - char ifname[IFNAMSIZ]; + __be32 __addr_v4; + u_int16_t af; + union nf_inet_addr addr; + char ifname[IFNAMSIZ]; }; struct ip_vs_blklst_user { - __be32 __addr_v4; - u_int16_t af; - union nf_inet_addr addr; + __be32 __addr_v4; + u_int16_t af; + union nf_inet_addr addr; }; struct ip_vs_whtlst_user { - __be32 __addr_v4; - u_int16_t af; - union nf_inet_addr addr; + __be32 __addr_v4; + u_int16_t af; + union nf_inet_addr addr; }; struct ip_vs_tunnel_user { - char ifname[IFNAMSIZ]; - char kind[TNLKINDSIZ]; - char link[IFNAMSIZ]; - union nf_inet_addr laddr; - union nf_inet_addr raddr; + char ifname[IFNAMSIZ]; + char kind[TNLKINDSIZ]; + char link[IFNAMSIZ]; + union nf_inet_addr laddr; + union nf_inet_addr raddr; }; -/* IPVS statistics object (for user space) */ +/* + * IPVS statistics object (for user space) + */ struct ip_vs_stats_user { - __u64 conns; /* connections scheduled */ - __u64 inpkts; /* incoming packets */ - __u64 inbytes; /* incoming bytes */ - __u64 outpkts; /* outgoing packets */ - __u64 outbytes; /* outgoing bytes */ - - __u32 cps; /* current connection rate */ - __u32 inpps; /* current in packet rate */ - __u32 inbps; /* current in byte rate */ - __u32 outpps; /* current out packet rate */ - __u32 outbps; /* current out byte rate */ + __u64 conns; /* connections scheduled */ + __u64 inpkts; /* incoming packets */ + __u64 inbytes; /* incoming bytes */ + __u64 outpkts; /* outgoing packets */ + __u64 outbytes; /* outgoing bytes */ + + __u32 cps; /* current connection rate */ + __u32 inpps; /* current in packet rate */ + __u32 inbps; /* current in byte rate */ + __u32 outpps; /* current out packet rate */ + __u32 outbps; /* current out byte rate */ }; /* The argument to IP_VS_SO_GET_INFO */ struct ip_vs_getinfo { - /* version number */ - unsigned int version; + /* version number */ + unsigned int version; - /* size of connection hash table */ - unsigned int size; + /* size of connection hash table */ + unsigned int size; - /* number of virtual services */ - unsigned int num_services; + /* number of virtual services */ + unsigned int num_services; }; /* The argument to IP_VS_SO_GET_SERVICE */ struct ip_vs_service_entry { - /* which service: user fills in these */ - __u16 protocol; - __be32 __addr_v4; /* virtual address */ - __be16 port; - __u32 fwmark; /* firwall mark of service */ - - /* service options */ - char sched_name[IP_VS_SCHEDNAME_MAXLEN]; - unsigned int flags; /* virtual service flags */ - unsigned int timeout; /* persistent timeout */ - unsigned int conn_timeout; - __be32 netmask; /* persistent netmask */ - - /* number of real servers */ - unsigned int num_dests; - unsigned int num_laddrs; - unsigned int bps; - unsigned int limit_proportion; - - /* statistics */ - struct ip_vs_stats_user stats; - - char srange[256]; - char drange[256]; - char iifname[IFNAMSIZ]; - char oifname[IFNAMSIZ]; + /* which service: user fills in these */ + __u16 protocol; + __be32 __addr_v4; /* virtual address */ + __be16 port; + __u32 fwmark; /* firwall mark of service */ + + /* service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned int flags; /* virtual service flags */ + unsigned int timeout; /* persistent timeout */ + unsigned int conn_timeout; + __be32 netmask; /* persistent netmask */ + + /* number of real servers */ + unsigned int num_dests; + unsigned int num_laddrs; + unsigned int bps; + unsigned int limit_proportion; + + /* statistics */ + struct ip_vs_stats_user stats; + + char srange[256]; + char drange[256]; + char iifname[IFNAMSIZ]; + char oifname[IFNAMSIZ]; }; struct ip_vs_dest_entry { - __be32 __addr_v4; /* destination address */ - __be16 port; - unsigned int conn_flags; /* connection flags */ - int weight; /* destination weight */ + __be32 __addr_v4; /* destination address */ + __be16 port; + unsigned int conn_flags; /* connection flags */ + int weight; /* destination weight */ - __u32 u_threshold; /* upper threshold */ - __u32 l_threshold; /* lower threshold */ + __u32 u_threshold; /* upper threshold */ + __u32 l_threshold; /* lower threshold */ - __u32 activeconns; /* active connections */ - __u32 inactconns; /* inactive connections */ - __u32 persistconns; /* persistent connections */ + __u32 activeconns; /* active connections */ + __u32 inactconns; /* inactive connections */ + __u32 persistconns; /* persistent connections */ - /* statistics */ - struct ip_vs_stats_user stats; + /* statistics */ + struct ip_vs_stats_user stats; }; struct ip_vs_laddr_entry { - __be32 __addr_v4; /* local address - internal use only */ - u_int64_t port_conflict; /* conflict counts */ - u_int32_t conn_counts; /* current connects */ - u_int16_t af; - union nf_inet_addr addr; + __be32 __addr_v4; /* local address - internal use only */ + u_int64_t port_conflict; /* conflict counts */ + u_int32_t conn_counts; /* current connects */ + u_int16_t af; + union nf_inet_addr addr; }; +/* The argument to IP_VS_SO_GET_LADDRS */ struct ip_vs_get_laddrs { - /* which service: user fills in these */ - u_int16_t protocol; - __be32 __addr_v4; /* virtual address - internal use only */ - __be16 port; - u_int32_t fwmark; /* firwall mark of service */ - - /* number of local address*/ - unsigned int num_laddrs; - u_int16_t af; - union nf_inet_addr addr; - - /* the real servers */ - struct ip_vs_laddr_entry entrytable[0]; -}; - -/* The argument to IP_VS_SO_GET_SERVICES */ -struct ip_vs_get_services { - /* number of virtual services */ - unsigned int num_services; - unsigned int cid; - /* service table */ - struct ip_vs_service_entry entrytable[0]; + /* which service: user fills in these */ + u_int16_t protocol; + __be32 __addr_v4; /* virtual address - internal use only */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* number of local address*/ + unsigned int num_laddrs; + u_int16_t af; + union nf_inet_addr addr; + + /* the real servers */ + struct ip_vs_laddr_entry entrytable[0]; }; /* The argument to IP_VS_SO_GET_TIMEOUT */ struct ip_vs_timeout_user { - int tcp_timeout; - int tcp_fin_timeout; - int udp_timeout; + int tcp_timeout; + int tcp_fin_timeout; + int udp_timeout; }; -/* The argument to IP_VS_SO_GET_DAEMON */ -struct ip_vs_daemon_user { - /* sync daemon state (master/backup) */ - int state; - - /* multicast interface name */ - char mcast_ifn[IP_VS_IFNAME_MAXLEN]; - - /* SyncID we belong to */ - int syncid; -}; - -#define IPROUTE_DEL 0 -#define IPROUTE_ADD 1 - -#define IPADDRESS_DEL 0 -#define IPADDRESS_ADD 1 - -/* - * - * IPVS Generic Netlink interface definitions - * - */ - -/* Generic Netlink family info */ - -#define IPVS_GENL_NAME "IPVS" -#define IPVS_GENL_VERSION 0x1 - -struct ip_vs_flags { - __u32 flags; - __u32 mask; -}; - -/* Generic Netlink command attributes */ -enum { - IPVS_CMD_UNSPEC = 0, - - IPVS_CMD_NEW_SERVICE, /* add service */ - IPVS_CMD_SET_SERVICE, /* modify service */ - IPVS_CMD_DEL_SERVICE, /* delete service */ - IPVS_CMD_GET_SERVICE, /* get service info */ - - IPVS_CMD_NEW_DEST, /* add destination */ - IPVS_CMD_SET_DEST, /* modify destination */ - IPVS_CMD_DEL_DEST, /* delete destination */ - IPVS_CMD_GET_DEST, /* get destination info */ - - IPVS_CMD_NEW_DAEMON, /* start sync daemon */ - IPVS_CMD_DEL_DAEMON, /* stop sync daemon */ - IPVS_CMD_GET_DAEMON, /* get sync daemon status */ - - IPVS_CMD_SET_CONFIG, /* set config settings */ - IPVS_CMD_GET_CONFIG, /* get config settings */ - - IPVS_CMD_SET_INFO, /* only used in GET_INFO reply */ - IPVS_CMD_GET_INFO, /* get general IPVS info */ - - IPVS_CMD_ZERO, /* zero all counters and stats */ - IPVS_CMD_FLUSH, /* flush services and dests */ - - __IPVS_CMD_MAX, -}; - -#define IPVS_CMD_MAX (__IPVS_CMD_MAX - 1) - -/* Attributes used in the first level of commands */ -enum { - IPVS_CMD_ATTR_UNSPEC = 0, - IPVS_CMD_ATTR_SERVICE, /* nested service attribute */ - IPVS_CMD_ATTR_DEST, /* nested destination attribute */ - IPVS_CMD_ATTR_DAEMON, /* nested sync daemon attribute */ - IPVS_CMD_ATTR_TIMEOUT_TCP, /* TCP connection timeout */ - IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, /* TCP FIN wait timeout */ - IPVS_CMD_ATTR_TIMEOUT_UDP, /* UDP timeout */ - __IPVS_CMD_ATTR_MAX, -}; - -#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1) - -/* - * Attributes used to describe a service - * - * Used inside nested attribute IPVS_CMD_ATTR_SERVICE - */ -enum { - IPVS_SVC_ATTR_UNSPEC = 0, - IPVS_SVC_ATTR_AF, /* address family */ - IPVS_SVC_ATTR_PROTOCOL, /* virtual service protocol */ - IPVS_SVC_ATTR_ADDR, /* virtual service address */ - IPVS_SVC_ATTR_PORT, /* virtual service port */ - IPVS_SVC_ATTR_FWMARK, /* firewall mark of service */ - - IPVS_SVC_ATTR_SCHED_NAME, /* name of scheduler */ - IPVS_SVC_ATTR_FLAGS, /* virtual service flags */ - IPVS_SVC_ATTR_TIMEOUT, /* persistent timeout */ - IPVS_SVC_ATTR_NETMASK, /* persistent netmask */ - - IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */ - - IPVS_SVC_ATTR_PE_NAME, /* name of ct retriever */ - - __IPVS_SVC_ATTR_MAX, -}; - -#define IPVS_SVC_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1) - -/* - * Attributes used to describe a destination (real server) - * - * Used inside nested attribute IPVS_CMD_ATTR_DEST - */ -enum { - IPVS_DEST_ATTR_UNSPEC = 0, - IPVS_DEST_ATTR_ADDR, /* real server address */ - IPVS_DEST_ATTR_PORT, /* real server port */ - - IPVS_DEST_ATTR_FWD_METHOD, /* forwarding method */ - IPVS_DEST_ATTR_WEIGHT, /* destination weight */ - - IPVS_DEST_ATTR_U_THRESH, /* upper threshold */ - IPVS_DEST_ATTR_L_THRESH, /* lower threshold */ - - IPVS_DEST_ATTR_ACTIVE_CONNS, /* active connections */ - IPVS_DEST_ATTR_INACT_CONNS, /* inactive connections */ - IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */ - - IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */ - __IPVS_DEST_ATTR_MAX, -}; - -#define IPVS_DEST_ATTR_MAX (__IPVS_DEST_ATTR_MAX - 1) - -/* - * Attributes describing a sync daemon - * - * Used inside nested attribute IPVS_CMD_ATTR_DAEMON - */ -enum { - IPVS_DAEMON_ATTR_UNSPEC = 0, - IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */ - IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */ - IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */ - __IPVS_DAEMON_ATTR_MAX, -}; - -#define IPVS_DAEMON_ATTR_MAX (__IPVS_DAEMON_ATTR_MAX - 1) - -/* - * Attributes used to describe service or destination entry statistics - * - * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS - */ -enum { - IPVS_STATS_ATTR_UNSPEC = 0, - IPVS_STATS_ATTR_CONNS, /* connections scheduled */ - IPVS_STATS_ATTR_INPKTS, /* incoming packets */ - IPVS_STATS_ATTR_OUTPKTS, /* outgoing packets */ - IPVS_STATS_ATTR_INBYTES, /* incoming bytes */ - IPVS_STATS_ATTR_OUTBYTES, /* outgoing bytes */ - - IPVS_STATS_ATTR_CPS, /* current connection rate */ - IPVS_STATS_ATTR_INPPS, /* current in packet rate */ - IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */ - IPVS_STATS_ATTR_INBPS, /* current in byte rate */ - IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */ - __IPVS_STATS_ATTR_MAX, -}; - -#define IPVS_STATS_ATTR_MAX (__IPVS_STATS_ATTR_MAX - 1) - -/* Attributes used in response to IPVS_CMD_GET_INFO command */ -enum { - IPVS_INFO_ATTR_UNSPEC = 0, - IPVS_INFO_ATTR_VERSION, /* IPVS version number */ - IPVS_INFO_ATTR_CONN_TAB_SIZE, /* size of connection hash table */ - __IPVS_INFO_ATTR_MAX, -}; - -#define IPVS_INFO_ATTR_MAX (__IPVS_INFO_ATTR_MAX - 1) - - ///////////////////////////////////////////////////////////////////////////////////////// // // Part2. headers derived from "keepalived/include/ip_vs.h" // ///////////////////////////////////////////////////////////////////////////////////////// -typedef struct ip_vs_stats_user ip_vs_stats_t; - struct ip_vs_service_app { - struct ip_vs_service_user user; - uint16_t af; - union nf_inet_addr nf_addr; - char pe_name[IP_VS_PENAME_MAXLEN]; + struct ip_vs_service_user user; + uint16_t af; + union nf_inet_addr nf_addr; + char pe_name[IP_VS_PENAME_MAXLEN]; }; struct ip_vs_dest_app { - struct ip_vs_dest_user user; - uint16_t af; - union nf_inet_addr nf_addr; + struct ip_vs_dest_user user; + uint16_t af; + union nf_inet_addr nf_addr; #ifdef _HAVE_IPVS_TUN_TYPE_ - int tun_type; - int tun_port; + int tun_type; + int tun_port; #ifdef _HAVE_IPVS_TUN_CSUM_ - int tun_flags; + int tun_flags; #endif #endif }; struct ip_vs_service_entry_app { - struct ip_vs_service_entry user; - ip_vs_stats_t stats; - uint16_t af; - union nf_inet_addr nf_addr; - char pe_name[IP_VS_PENAME_MAXLEN]; + struct ip_vs_service_entry user; + struct ip_vs_stats_user stats; + uint16_t af; + union nf_inet_addr nf_addr; + char pe_name[IP_VS_PENAME_MAXLEN]; }; struct ip_vs_dest_entry_app { - struct ip_vs_dest_entry user; - ip_vs_stats_t stats; - uint16_t af; - union nf_inet_addr nf_addr; - + struct ip_vs_dest_entry user; + struct ip_vs_stats_user stats; + uint16_t af; + union nf_inet_addr nf_addr; }; struct ip_vs_get_dests_app { - struct { // Can we avoid this duplication of definition? - /* which service: user fills in these */ - __u16 protocol; - __be32 __addr_v4; /* virtual address */ - __be16 port; - __u32 fwmark; /* firwall mark of service */ - - /* number of real servers */ - unsigned int num_dests; - char srange[256]; - char drange[256]; - char iifname[IFNAMSIZ]; - char oifname[IFNAMSIZ]; - - /* the real servers */ - struct ip_vs_dest_entry_app entrytable[0]; - } user; - - uint16_t af; - union nf_inet_addr nf_addr; + struct { // Can we avoid this duplication of definition? + /* which service: user fills in these */ + __u16 protocol; + __be32 __addr_v4; /* virtual address */ + __be16 port; + __u32 fwmark; /* firwall mark of service */ + + /* number of real servers */ + unsigned int num_dests; + char srange[256]; + char drange[256]; + char iifname[IFNAMSIZ]; + char oifname[IFNAMSIZ]; + + /* the real servers */ + struct ip_vs_dest_entry_app entrytable[0]; + } user; + + uint16_t af; + union nf_inet_addr nf_addr; }; /* The argument to IP_VS_SO_GET_SERVICES */ struct ip_vs_get_services_app { - struct { - /* number of virtual services */ - unsigned int num_services; + struct { + /* number of virtual services */ + unsigned int num_services; - /* service table */ - struct ip_vs_service_entry_app entrytable[0]; - } user; + /* service table */ + struct ip_vs_service_entry_app entrytable[0]; + } user; }; /* Make sure we don't have an inconsistent definition */ #if IP_VS_IFNAME_MAXLEN > IFNAMSIZ - #error The code assumes that IP_VS_IFNAME_MAXLEN <= IFNAMSIZ + #error The code assumes that IP_VS_IFNAME_MAXLEN <= IFNAMSIZ #endif /* The argument to IP_VS_SO_GET_DAEMON */ struct ip_vs_daemon_kern { - /* sync daemon state (master/backup) */ - int state; + /* sync daemon state (master/backup) */ + int state; - /* multicast interface name */ - char mcast_ifn[IP_VS_IFNAME_MAXLEN]; + /* multicast interface name */ + char mcast_ifn[IP_VS_IFNAME_MAXLEN]; - /* SyncID we belong to */ - int syncid; + /* SyncID we belong to */ + int syncid; }; struct ip_vs_daemon_app { - /* sync daemon state (master/backup) */ - int state; + /* sync daemon state (master/backup) */ + int state; - /* multicast interface name */ - char mcast_ifn[IP_VS_IFNAME_MAXLEN]; + /* multicast interface name */ + char mcast_ifn[IP_VS_IFNAME_MAXLEN]; - /* SyncID we belong to */ - int syncid; + /* SyncID we belong to */ + int syncid; #ifdef _HAVE_IPVS_SYNCD_ATTRIBUTES_ - /* UDP Payload Size */ - uint16_t sync_maxlen; + /* UDP Payload Size */ + uint16_t sync_maxlen; - /* Multicast Port (base) */ - uint16_t mcast_port; + /* Multicast Port (base) */ + uint16_t mcast_port; - /* Multicast TTL */ - uint8_t mcast_ttl; + /* Multicast TTL */ + uint8_t mcast_ttl; - /* Multicast Address Family */ - uint16_t mcast_af; + /* Multicast Address Family */ + uint16_t mcast_af; - /* Multicast Address */ - union nf_inet_addr mcast_group; + /* Multicast Address */ + union nf_inet_addr mcast_group; #endif }; -#endif /* KEEPALIVED_IP_VS_H */ +#endif /* KEEPALIVED_IP_VS_H */ diff --git a/tools/keepalived/keepalived/include/libipvs.h b/tools/keepalived/keepalived/include/libipvs.h index 0e0b3cdc1..80a2cb105 100644 --- a/tools/keepalived/keepalived/include/libipvs.h +++ b/tools/keepalived/keepalived/include/libipvs.h @@ -78,6 +78,7 @@ typedef struct ip_vs_tunnel_user ipvs_tunnel_t; typedef struct ip_vs_laddr_entry ipvs_laddr_entry_t; typedef struct ip_vs_blklst_entry ipvs_blklst_entry_t; typedef struct ip_vs_whtlst_entry ipvs_whtlst_entry_t; +typedef struct ip_vs_stats_user ip_vs_stats_t; /* init socket and get ipvs info */ From c984c03b5b2017261166d5386ebf7ebea0ed2fd9 Mon Sep 17 00:00:00 2001 From: Chion Deng Date: Wed, 20 Jan 2021 18:24:10 +0800 Subject: [PATCH 11/35] feature: send TCP RST if syn cookie check fails --- src/ipvs/ip_vs_synproxy.c | 186 +++++++++++++++++++++++++++++++++++++- 1 file changed, 185 insertions(+), 1 deletion(-) diff --git a/src/ipvs/ip_vs_synproxy.c b/src/ipvs/ip_vs_synproxy.c index dabd19ad1..397618343 100644 --- a/src/ipvs/ip_vs_synproxy.c +++ b/src/ipvs/ip_vs_synproxy.c @@ -947,6 +947,186 @@ static int syn_proxy_send_rs_syn(int af, const struct tcphdr *th, return EDPVS_OK; } +/* Reuse mbuf and construct TCP RST packet */ +static int syn_proxy_build_tcp_rst(int af, struct rte_mbuf *mbuf, + void *iph, struct tcphdr *th, + uint32_t l3_len, uint32_t l4_len) +{ + struct netif_port *dev; + uint16_t tmpport; + uint16_t tcph_len, payload_len; + struct iphdr *ip4h; + struct ip6_hdr *ip6h; + uint32_t seq; + + if (unlikely(l4_len < sizeof(struct tcphdr))) + return EDPVS_INVPKT; + + tcph_len = th->doff * 4; + + if (unlikely(l4_len < tcph_len)) + return EDPVS_INVPKT; + + payload_len = l4_len - tcph_len; + + /* set tx offload flags */ + dev = netif_port_get(mbuf->port); + if (unlikely(!dev)) { + RTE_LOG(ERR, IPVS, "%s: device port %d not found\n", + __func__, mbuf->port); + return EDPVS_NOTEXIST; + } + if (likely(dev && (dev->flag & NETIF_PORT_FLAG_TX_TCP_CSUM_OFFLOAD))) { + if (af == AF_INET6) + mbuf->ol_flags |= (PKT_TX_TCP_CKSUM | PKT_TX_IPV6); + else + mbuf->ol_flags |= (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM | PKT_TX_IPV4); + } + + /* exchange ports */ + tmpport = th->dest; + th->dest = th->source; + th->source = tmpport; + /* set window size to zero */ + th->window = 0; + /* set seq and ack_seq */ + seq = th->ack_seq; + if (th->syn) + th->ack_seq = htonl(ntohl(th->seq) + 1); + else + th->ack_seq = htonl(ntohl(th->seq) + payload_len); + th->seq = seq; + /* set TCP flags */ + th->fin = 0; + th->syn = 0; + th->rst = 1; + th->psh = 0; + th->ack = 1; + + /* truncate packet if TCP payload presents */ + if (payload_len > 0) { + if (rte_pktmbuf_trim(mbuf, payload_len) != 0) { + return EDPVS_INVPKT; + } + l4_len -= payload_len; + } + + if (AF_INET6 == af) { + struct in6_addr tmpaddr; + ip6h = iph; + + tmpaddr = ip6h->ip6_src; + ip6h->ip6_src = ip6h->ip6_dst; + ip6h->ip6_dst = tmpaddr; + ip6h->ip6_hlim = 63; + ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) - payload_len); + + /* compute checksum */ + if (likely(mbuf->ol_flags & PKT_TX_TCP_CKSUM)) { + mbuf->l3_len = l3_len; + mbuf->l4_len = l4_len; + th->check = ip6_phdr_cksum(ip6h, mbuf->ol_flags, mbuf->l3_len, IPPROTO_TCP); + } else { + if (mbuf_may_pull(mbuf, mbuf->pkt_len) != 0) + return EDPVS_INVPKT; + tcp6_send_csum((struct ipv6_hdr*)ip6h, th); + } + } else { + uint32_t tmpaddr; + ip4h = iph; + + tmpaddr = ip4h->saddr; + ip4h->saddr = ip4h->daddr; + ip4h->daddr = tmpaddr; + ip4h->ttl = 63; + ip4h->tot_len = htons(ntohs(ip4h->tot_len) - payload_len); + ip4h->tos = 0; + + /* compute checksum */ + if (likely(mbuf->ol_flags & PKT_TX_TCP_CKSUM)) { + mbuf->l3_len = l3_len; + mbuf->l4_len = l4_len; + th->check = ip4_phdr_cksum((struct ipv4_hdr*)ip4h, mbuf->ol_flags); + } else { + if (mbuf_may_pull(mbuf, mbuf->pkt_len) != 0) + return EDPVS_INVPKT; + tcp4_send_csum((struct ipv4_hdr*)ip4h, th); + } + + if (likely(mbuf->ol_flags & PKT_TX_IP_CKSUM)) + ip4h->check = 0; + else + ip4_send_csum((struct ipv4_hdr*)ip4h); + } + + return EDPVS_OK; +} + +/* Send TCP RST to client before conn is established. + * mbuf is consumed if EDPVS_OK is returned. */ +static int syn_proxy_send_tcp_rst(int af, struct rte_mbuf *mbuf) +{ + struct tcphdr *th; + struct netif_port *dev; + struct ether_hdr *eth; + struct ether_addr ethaddr; + uint32_t l3_len, l4_len; + void *l3_hdr; + + th = tcp_hdr(mbuf); + if (unlikely(!th)) + return EDPVS_INVPKT; + + if (AF_INET6 == af) { + l3_hdr = ip6_hdr(mbuf); + } else { + l3_hdr = ip4_hdr(mbuf); + } + + l3_len = (void *) th - l3_hdr; + + l4_len = mbuf->pkt_len - l3_len; + + if (unlikely(l4_len < sizeof(struct tcphdr) + || mbuf_may_pull(mbuf, mbuf->pkt_len) != 0)) { + return EDPVS_INVPKT; + } + + if (EDPVS_OK != syn_proxy_build_tcp_rst(af, mbuf, l3_hdr, + th, l3_len, l4_len)) + return EDPVS_INVPKT; + + if (mbuf->l2_len < sizeof(struct ether_hdr)) + return EDPVS_INVPKT; + /* set L2 header and send the packet out + * It is noted that "ipv4_xmit" should not used here, + * because mbuf is reused. */ + eth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, mbuf->l2_len); + if (unlikely(!eth)) { + RTE_LOG(ERR, IPVS, "%s: no memory\n", __func__); + return EDPVS_NOMEM; + } + memcpy(ðaddr, ð->s_addr, sizeof(struct ether_addr)); + memcpy(ð->s_addr, ð->d_addr, sizeof(struct ether_addr)); + memcpy(ð->d_addr, ðaddr, sizeof(struct ether_addr)); + + dev = netif_port_get(mbuf->port); + if (unlikely(!dev)) { + RTE_LOG(ERR, IPVS, "%s: device port %d not found\n", + __func__, mbuf->port); + return EDPVS_NOTEXIST; + } + if (unlikely(EDPVS_OK != netif_xmit(mbuf, dev))) { + RTE_LOG(ERR, IPVS, "%s: netif_xmit failed\n", + __func__); + /* should not set verdict to INET_DROP since netif_xmit + * always consume the mbuf while INET_DROP means mbuf'll + * be free in INET_HOOK.*/ + } + + return EDPVS_OK; +} + /* Syn-proxy step 2 logic: receive client's Ack * Receive client's 3-handshakes ack packet, do cookie check and then * send syn to rs after creating a session */ @@ -986,7 +1166,11 @@ int dp_vs_synproxy_ack_rcv(int af, struct rte_mbuf *mbuf, /* Cookie check failed, drop the packet */ RTE_LOG(DEBUG, IPVS, "%s: syn_cookie check failed seq=%u\n", __func__, ntohl(th->ack_seq) - 1); - *verdict = INET_DROP; + if (EDPVS_OK == syn_proxy_send_tcp_rst(af, mbuf)) { + *verdict = INET_STOLEN; + } else { + *verdict = INET_DROP; + } return 0; } From 7bf39b2f7d26db7d63e78c2d0573916c6fa3e575 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Thu, 4 Feb 2021 15:43:30 +0800 Subject: [PATCH 12/35] obsolete patches dpdk version of 16.07,17.05.02 --- ...link-event-for-multicast-driver-part.patch | 107 ----------------- ...tlink-event-for-multicast-driver-par.patch | 108 ------------------ ...iable-IP-header-len-for-checksum-API.patch | 48 -------- 3 files changed, 263 deletions(-) delete mode 100644 patch/dpdk-16.07/0001-kni-use-netlink-event-for-multicast-driver-part.patch delete mode 100644 patch/dpdk-stable-17.05.2/0001-PATCH-kni-use-netlink-event-for-multicast-driver-par.patch delete mode 100644 patch/dpdk-stable-17.05.2/0002-net-support-variable-IP-header-len-for-checksum-API.patch diff --git a/patch/dpdk-16.07/0001-kni-use-netlink-event-for-multicast-driver-part.patch b/patch/dpdk-16.07/0001-kni-use-netlink-event-for-multicast-driver-part.patch deleted file mode 100644 index f0361a59b..000000000 --- a/patch/dpdk-16.07/0001-kni-use-netlink-event-for-multicast-driver-part.patch +++ /dev/null @@ -1,107 +0,0 @@ -From 8d6d303be80c246cf0c92b143432aede255c9238 Mon Sep 17 00:00:00 2001 -From: Lei Chen -Date: Fri, 3 Mar 2017 14:49:17 +0800 -Subject: [PATCH] kni: use netlink event for multicast (driver part). - -kni driver send netlink event every time hw-multicast list updated by -kernel, the user kni app should capture the event and update multicast -to kni device. - -original way is using rte_kni_request to pass hw-multicast to user kni -module. that method works but finally memory corruption found, which is -not easy to address. ---- - lib/librte_eal/linuxapp/kni/kni_net.c | 67 +++++++++++++++++++++++++++++++++++ - 1 file changed, 67 insertions(+) - -diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c b/lib/librte_eal/linuxapp/kni/kni_net.c -index fc82193..952e9b8 100644 ---- a/lib/librte_eal/linuxapp/kni/kni_net.c -+++ b/lib/librte_eal/linuxapp/kni/kni_net.c -@@ -35,6 +35,8 @@ - #include - #include - #include -+#include -+#include - - #include - #include -@@ -527,9 +529,74 @@ kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) - return 0; - } - -+static size_t kni_nlmsg_size(void) -+{ -+ return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) -+ + nla_total_size(4) /* IFA_ADDRESS */ -+ + nla_total_size(4) /* IFA_LOCAL */ -+ + nla_total_size(4) /* IFA_BROADCAST */ -+ + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ -+ + nla_total_size(4) /* IFA_FLAGS */ -+ + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */ -+} -+ - static void - kni_net_set_rx_mode(struct net_device *dev) - { -+ /* -+ * send event to notify user (DPDK KNI app) that multicast list changed, -+ * so that it can monitor multicast join/leave and set HW mc-addrs to -+ * kni dev accordinglly. -+ * -+ * this event is just an notification, we do not save any mc-addr here -+ * (so attribute space for us). user kni app should get maddrs after -+ * receive this notification. -+ * -+ * I was expecting kernel send some rtnl event for multicast join/leave, -+ * but it doesn't. By checking the call-chain of SIOCADDMULTI (ip maddr, -+ * manages only hardware multicast) and IP_ADD_MEMBERSHIP (ip_mc_join_group, -+ * used to for IPv4 multicast), no rtnl event sent. -+ * -+ * so as workaround, modify kni driver here to send RTM_NEWADDR. -+ * it may not suitalbe to use this event for mcast, but that should works. -+ * hope that won't affect other listener to this event. -+ * -+ * previous solution was using rte_kni_request to pass hw-maddr list to user. -+ * it "works" for times but finally memory corruption found, which is -+ * not easy to address (lock was added and reviewed). That's why we use -+ * netlink event instead. -+ */ -+ struct sk_buff *skb; -+ struct net *net = dev_net(dev); -+ struct nlmsghdr *nlh; -+ struct ifaddrmsg *ifm; -+ -+ skb = nlmsg_new(kni_nlmsg_size(), GFP_KERNEL); -+ if (!skb) -+ return; -+ -+ /* no other event for us ? */ -+ nlh = nlmsg_put(skb, 0, 0, RTM_NEWADDR, sizeof(*ifm), 0); -+ if (!nlh) { -+ kfree_skb(skb); -+ return; -+ } -+ -+ /* just send an notification so no other info */ -+ ifm = nlmsg_data(nlh); -+ memset(ifm, 0, sizeof(*ifm)); -+ ifm->ifa_family = AF_UNSPEC; -+ ifm->ifa_prefixlen = 0; -+ ifm->ifa_flags = 0; -+ ifm->ifa_scope = RT_SCOPE_NOWHERE; -+ ifm->ifa_index = 0; -+ -+ nlmsg_end(skb, nlh); -+ -+ /* other group ? */ -+ KNI_DBG("%s: rx-mode/multicast-list changed\n", __func__); -+ rtnl_notify(skb, net, 0, RTNLGRP_NOTIFY, NULL, GFP_KERNEL); -+ return; - } - - static int --- -2.7.4 - diff --git a/patch/dpdk-stable-17.05.2/0001-PATCH-kni-use-netlink-event-for-multicast-driver-par.patch b/patch/dpdk-stable-17.05.2/0001-PATCH-kni-use-netlink-event-for-multicast-driver-par.patch deleted file mode 100644 index 9c37cb46d..000000000 --- a/patch/dpdk-stable-17.05.2/0001-PATCH-kni-use-netlink-event-for-multicast-driver-par.patch +++ /dev/null @@ -1,108 +0,0 @@ -From b5843bda351920c27be5e8211ef6fa5d548fa03e Mon Sep 17 00:00:00 2001 -From: Lei Chen -Date: Tue, 23 Jan 2018 12:39:56 +0800 -Subject: [PATCH] kni: use netlink event for multicast (driver part). - -kni driver send netlink event every time hw-multicast list updated by -kernel, the user kni app should capture the event and update multicast -to kni device. - -original way is using rte_kni_request to pass hw-multicast to user kni -module. that method works but finally memory corruption found, which is -not easy to address. ---- - lib/librte_eal/linuxapp/kni/kni_net.c | 68 +++++++++++++++++++++++++++++++++++ - 1 file changed, 68 insertions(+) - -diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c b/lib/librte_eal/linuxapp/kni/kni_net.c -index db9f489..fab94d1 100644 ---- a/lib/librte_eal/linuxapp/kni/kni_net.c -+++ b/lib/librte_eal/linuxapp/kni/kni_net.c -@@ -35,6 +35,8 @@ - #include - #include - #include -+#include -+#include - - #include - #include -@@ -579,9 +581,75 @@ kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) - return 0; - } - -+static size_t -+kni_nlmsg_size(void) -+{ -+ return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) -+ + nla_total_size(4) /* IFA_ADDRESS */ -+ + nla_total_size(4) /* IFA_LOCAL */ -+ + nla_total_size(4) /* IFA_BROADCAST */ -+ + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ -+ + nla_total_size(4) /* IFA_FLAGS */ -+ + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */ -+} -+ - static void - kni_net_set_rx_mode(struct net_device *dev) - { -+ /* -+ * send event to notify user (DPDK KNI app) that multicast list changed, -+ * so that it can monitor multicast join/leave and set HW mc-addrs to -+ * kni dev accordinglly. -+ * -+ * this event is just an notification, we do not save any mc-addr here -+ * (so attribute space for us). user kni app should get maddrs after -+ * receive this notification. -+ * -+ * I was expecting kernel send some rtnl event for multicast join/leave, -+ * but it doesn't. By checking the call-chain of SIOCADDMULTI (ip maddr, -+ * manages only hardware multicast) and IP_ADD_MEMBERSHIP (ip_mc_join_group, -+ * used to for IPv4 multicast), no rtnl event sent. -+ * -+ * so as workaround, modify kni driver here to send RTM_NEWADDR. -+ * it may not suitalbe to use this event for mcast, but that should works. -+ * hope that won't affect other listener to this event. -+ * -+ * previous solution was using rte_kni_request to pass hw-maddr list to user. -+ * it "works" for times but finally memory corruption found, which is -+ * not easy to address (lock was added and reviewed). That's why we use -+ * netlink event instead. -+ */ -+ struct sk_buff *skb; -+ struct net *net = dev_net(dev); -+ struct nlmsghdr *nlh; -+ struct ifaddrmsg *ifm; -+ -+ skb = nlmsg_new(kni_nlmsg_size(), GFP_ATOMIC); -+ if (!skb) -+ return; -+ -+ /* no other event for us ? */ -+ nlh = nlmsg_put(skb, 0, 0, RTM_NEWADDR, sizeof(*ifm), 0); -+ if (!nlh) { -+ kfree_skb(skb); -+ return; -+ } -+ -+ /* just send an notification so no other info */ -+ ifm = nlmsg_data(nlh); -+ memset(ifm, 0, sizeof(*ifm)); -+ ifm->ifa_family = AF_UNSPEC; -+ ifm->ifa_prefixlen = 0; -+ ifm->ifa_flags = 0; -+ ifm->ifa_scope = RT_SCOPE_NOWHERE; -+ ifm->ifa_index = 0; -+ -+ nlmsg_end(skb, nlh); -+ -+ /* other group ? */ -+ pr_debug("%s: rx-mode/multicast-list changed\n", __func__); -+ rtnl_notify(skb, net, 0, RTNLGRP_NOTIFY, NULL, GFP_ATOMIC); -+ return; - } - - static int --- -2.7.4 - diff --git a/patch/dpdk-stable-17.05.2/0002-net-support-variable-IP-header-len-for-checksum-API.patch b/patch/dpdk-stable-17.05.2/0002-net-support-variable-IP-header-len-for-checksum-API.patch deleted file mode 100644 index 927827a29..000000000 --- a/patch/dpdk-stable-17.05.2/0002-net-support-variable-IP-header-len-for-checksum-API.patch +++ /dev/null @@ -1,48 +0,0 @@ -From c2dfa35bbd68b869f9069ca9b7474dd93e2097ef Mon Sep 17 00:00:00 2001 -From: Lei Chen -Date: Tue, 6 Mar 2018 16:04:36 +0800 -Subject: [PATCH 2/2] net: support variable IP header len for checksum API. - -IPv4 checksum APIs use fixe IP header length, it will failed if there is -any IP option. Now calculating header length by "ihl" field, so that we -can support options. - -Signed-off-by: Lei Chen ---- - lib/librte_net/rte_ip.h | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/lib/librte_net/rte_ip.h b/lib/librte_net/rte_ip.h -index 4491b86..cfbc3bd 100644 ---- a/lib/librte_net/rte_ip.h -+++ b/lib/librte_net/rte_ip.h -@@ -314,7 +314,7 @@ static inline uint16_t - rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr) - { - uint16_t cksum; -- cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct ipv4_hdr)); -+ cksum = rte_raw_cksum(ipv4_hdr, (ipv4_hdr->version_ihl & 0xf) * 4); - return (cksum == 0xffff) ? cksum : ~cksum; - } - -@@ -356,7 +356,7 @@ rte_ipv4_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, uint64_t ol_flags) - } else { - psd_hdr.len = rte_cpu_to_be_16( - (uint16_t)(rte_be_to_cpu_16(ipv4_hdr->total_length) -- - sizeof(struct ipv4_hdr))); -+ - (ipv4_hdr->version_ihl & 0xf) * 4)); - } - return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr)); - } -@@ -381,7 +381,7 @@ rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr) - uint32_t l4_len; - - l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - -- sizeof(struct ipv4_hdr); -+ (ipv4_hdr->version_ihl & 0xf) * 4; - - cksum = rte_raw_cksum(l4_hdr, l4_len); - cksum += rte_ipv4_phdr_cksum(ipv4_hdr, 0); --- -2.7.4 - From aeefddae20c4d803c81a0ad1693411a254094fb7 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Thu, 4 Feb 2021 16:45:21 +0800 Subject: [PATCH 13/35] ipv4: remove superfluous ipv4 checksum functions Checksum problem for packets with ipv4 options is fixed by DPDK patch "net-support-variable-IP-header-len-for-checksum-API.patch" in dpvs patch directory. --- include/ipv4.h | 55 -------------------------------------- src/ipvs/ip_vs_proto_tcp.c | 4 +-- src/ipvs/ip_vs_proto_udp.c | 4 +-- src/ipvs/ip_vs_synproxy.c | 2 +- 4 files changed, 5 insertions(+), 60 deletions(-) diff --git a/include/ipv4.h b/include/ipv4.h index 17bd6037a..059cc859b 100644 --- a/include/ipv4.h +++ b/include/ipv4.h @@ -144,59 +144,4 @@ static inline bool ip4_is_frag(struct ipv4_hdr *iph) & htons(IPV4_HDR_MF_FLAG | IPV4_HDR_OFFSET_MASK)) != 0; } -/* - * Process the pseudo-header checksum of an IPv4 header. - * - * Different from "rte_ipv4_phdr_cksum", "ip4_phdr_cksum" allows for ipv4 options. - * The checksum field must be set to 0 by the caller. - * - * @param iph - * The pointer to the contiguous IPv4 header. - * @param ol_flags - * The ol_flags of the associated mbuf. - * @return - * The non-complemented pseudo checksum to set in the L4 header. - */ -static inline uint16_t ip4_phdr_cksum(struct ipv4_hdr *iph, uint64_t ol_flags) -{ - uint16_t csum; - uint16_t total_length = iph->total_length; - - iph->total_length = htons(ntohs(total_length) - - ((iph->version_ihl & 0xf) << 2) + sizeof(struct ipv4_hdr)); - csum = rte_ipv4_phdr_cksum(iph, ol_flags); - - iph->total_length = total_length; - return csum; -} - -/* - * Process the IPv4 UDP or TCP checksum. - * - * Different from "rte_ipv4_udptcp_cksum", "ip4_udptcp_cksum" allows for ipv4 options. - * The IP and layer 4 checksum must be set to 0 in the packet by the caller. - * - * @param iph - * The pointer to the contiguous IPv4 header. - * @param l4_hdr - * The pointer to the beginning of the L4 header. - * @return - * The complemented checksum to set in the L4 header. - */ -static inline uint16_t ip4_udptcp_cksum(struct ipv4_hdr *iph, const void *l4_hdr) -{ - uint16_t csum; - uint16_t total_length = iph->total_length; - uint8_t version_ihl = iph->version_ihl; - - iph->total_length = htons(ntohs(total_length) - - ((iph->version_ihl & 0xf) << 2) + sizeof(struct ipv4_hdr)); - iph->version_ihl = (version_ihl & 0xf0) | (sizeof(struct ipv4_hdr) >> 2); - csum = rte_ipv4_udptcp_cksum(iph, l4_hdr); - - iph->total_length = total_length; - iph->version_ihl = version_ihl; - return csum; -} - #endif /* __DPVS_IPV4_H__ */ diff --git a/src/ipvs/ip_vs_proto_tcp.c b/src/ipvs/ip_vs_proto_tcp.c index c632086dc..6ff8c7f4e 100644 --- a/src/ipvs/ip_vs_proto_tcp.c +++ b/src/ipvs/ip_vs_proto_tcp.c @@ -142,7 +142,7 @@ inline struct tcphdr *tcp_hdr(const struct rte_mbuf *mbuf) inline void tcp4_send_csum(struct ipv4_hdr *iph, struct tcphdr *th) { th->check = 0; - th->check = ip4_udptcp_cksum(iph, th); + th->check = rte_ipv4_udptcp_cksum(iph, th); } /* @@ -192,7 +192,7 @@ static inline int tcp_send_csum(int af, int iphdrlen, struct tcphdr *th, mbuf->l4_len = ntohs(iph->total_length) - iphdrlen; mbuf->l3_len = iphdrlen; mbuf->ol_flags |= (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM | PKT_TX_IPV4); - th->check = ip4_phdr_cksum(iph, mbuf->ol_flags); + th->check = rte_ipv4_phdr_cksum(iph, mbuf->ol_flags); } else { if (mbuf_may_pull(mbuf, mbuf->pkt_len) != 0) return EDPVS_INVPKT; diff --git a/src/ipvs/ip_vs_proto_udp.c b/src/ipvs/ip_vs_proto_udp.c index 5376e3a99..998ee608c 100644 --- a/src/ipvs/ip_vs_proto_udp.c +++ b/src/ipvs/ip_vs_proto_udp.c @@ -66,7 +66,7 @@ static int udp_timeouts[DPVS_UDP_S_LAST + 1] = { inline void udp4_send_csum(struct ipv4_hdr *iph, struct udp_hdr *uh) { uh->dgram_cksum = 0; - uh->dgram_cksum = ip4_udptcp_cksum(iph, uh); + uh->dgram_cksum = rte_ipv4_udptcp_cksum(iph, uh); } inline void udp6_send_csum(struct ipv6_hdr *iph, struct udp_hdr *uh) @@ -132,7 +132,7 @@ static inline int udp_send_csum(int af, int iphdrlen, struct udp_hdr *uh, mbuf->l3_len = iphdrlen; mbuf->l4_len = ntohs(iph->total_length) - iphdrlen; mbuf->ol_flags |= (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM | PKT_TX_IPV4); - uh->dgram_cksum = ip4_phdr_cksum(iph, mbuf->ol_flags); + uh->dgram_cksum = rte_ipv4_phdr_cksum(iph, mbuf->ol_flags); } else { if (mbuf_may_pull(mbuf, mbuf->pkt_len) != 0) return EDPVS_INVPKT; diff --git a/src/ipvs/ip_vs_synproxy.c b/src/ipvs/ip_vs_synproxy.c index 397618343..db19275c9 100644 --- a/src/ipvs/ip_vs_synproxy.c +++ b/src/ipvs/ip_vs_synproxy.c @@ -649,7 +649,7 @@ static void syn_proxy_reuse_mbuf(int af, struct rte_mbuf *mbuf, if (likely(mbuf->ol_flags & PKT_TX_TCP_CKSUM)) { mbuf->l3_len = iphlen; mbuf->l4_len = ntohs(iph->tot_len) - iphlen; - th->check = ip4_phdr_cksum((struct ipv4_hdr*)iph, mbuf->ol_flags); + th->check = rte_ipv4_phdr_cksum((struct ipv4_hdr*)iph, mbuf->ol_flags); } else { if (mbuf_may_pull(mbuf, mbuf->pkt_len) != 0) return; From c14ef3a9ab2149a88f2fc8cdec17544f237ac44c Mon Sep 17 00:00:00 2001 From: ywc689 Date: Thu, 4 Feb 2021 20:10:57 +0800 Subject: [PATCH 14/35] ipvs: rectify mbuf::l4_len for l4 checksum offload --- src/ipvs/ip_vs_proto_tcp.c | 4 ++-- src/ipvs/ip_vs_proto_udp.c | 4 ++-- src/ipvs/ip_vs_synproxy.c | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/ipvs/ip_vs_proto_tcp.c b/src/ipvs/ip_vs_proto_tcp.c index 6ff8c7f4e..361d435b0 100644 --- a/src/ipvs/ip_vs_proto_tcp.c +++ b/src/ipvs/ip_vs_proto_tcp.c @@ -173,7 +173,7 @@ static inline int tcp_send_csum(int af, int iphdrlen, struct tcphdr *th, dev = conn->out_dev; if (likely(dev && (dev->flag & NETIF_PORT_FLAG_TX_TCP_CSUM_OFFLOAD))) { mbuf->l3_len = iphdrlen; - mbuf->l4_len = ntohs(ip6h->ip6_plen) + sizeof(struct ip6_hdr) - iphdrlen; + mbuf->l4_len = (th->doff << 2); mbuf->ol_flags |= (PKT_TX_TCP_CKSUM | PKT_TX_IPV6); th->check = ip6_phdr_cksum(ip6h, mbuf->ol_flags, iphdrlen, IPPROTO_TCP); } else { @@ -189,8 +189,8 @@ static inline int tcp_send_csum(int af, int iphdrlen, struct tcphdr *th, else if (conn->out_dev) dev = conn->out_dev; if (likely(dev && (dev->flag & NETIF_PORT_FLAG_TX_TCP_CSUM_OFFLOAD))) { - mbuf->l4_len = ntohs(iph->total_length) - iphdrlen; mbuf->l3_len = iphdrlen; + mbuf->l4_len = (th->doff << 2); mbuf->ol_flags |= (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM | PKT_TX_IPV4); th->check = rte_ipv4_phdr_cksum(iph, mbuf->ol_flags); } else { diff --git a/src/ipvs/ip_vs_proto_udp.c b/src/ipvs/ip_vs_proto_udp.c index 998ee608c..e098d2774 100644 --- a/src/ipvs/ip_vs_proto_udp.c +++ b/src/ipvs/ip_vs_proto_udp.c @@ -97,7 +97,7 @@ static inline int udp_send_csum(int af, int iphdrlen, struct udp_hdr *uh, dev = conn->out_dev; if (likely(dev && (dev->flag & NETIF_PORT_FLAG_TX_UDP_CSUM_OFFLOAD))) { mbuf->l3_len = iphdrlen; - mbuf->l4_len = ntohs(ip6h->ip6_plen) + sizeof(struct ip6_hdr) -iphdrlen; + mbuf->l4_len = sizeof(struct udp_hdr); mbuf->ol_flags |= (PKT_TX_UDP_CKSUM | PKT_TX_IPV6); uh->dgram_cksum = ip6_phdr_cksum(ip6h, mbuf->ol_flags, iphdrlen, IPPROTO_UDP); @@ -130,7 +130,7 @@ static inline int udp_send_csum(int af, int iphdrlen, struct udp_hdr *uh, dev = conn->out_dev; if (likely(dev && (dev->flag & NETIF_PORT_FLAG_TX_UDP_CSUM_OFFLOAD))) { mbuf->l3_len = iphdrlen; - mbuf->l4_len = ntohs(iph->total_length) - iphdrlen; + mbuf->l4_len = sizeof(struct udp_hdr); mbuf->ol_flags |= (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM | PKT_TX_IPV4); uh->dgram_cksum = rte_ipv4_phdr_cksum(iph, mbuf->ol_flags); } else { diff --git a/src/ipvs/ip_vs_synproxy.c b/src/ipvs/ip_vs_synproxy.c index db19275c9..589a4d5a8 100644 --- a/src/ipvs/ip_vs_synproxy.c +++ b/src/ipvs/ip_vs_synproxy.c @@ -628,7 +628,7 @@ static void syn_proxy_reuse_mbuf(int af, struct rte_mbuf *mbuf, if (likely(mbuf->ol_flags & PKT_TX_TCP_CKSUM)) { mbuf->l3_len = (void *)th - (void *)ip6h; - mbuf->l4_len = ntohs(ip6h->ip6_plen) + sizeof(struct ip6_hdr) - mbuf->l3_len; + mbuf->l4_len = (th->doff << 2); th->check = ip6_phdr_cksum(ip6h, mbuf->ol_flags, mbuf->l3_len, IPPROTO_TCP); } else { if (mbuf_may_pull(mbuf, mbuf->pkt_len) != 0) @@ -648,7 +648,7 @@ static void syn_proxy_reuse_mbuf(int af, struct rte_mbuf *mbuf, /* compute checksum */ if (likely(mbuf->ol_flags & PKT_TX_TCP_CKSUM)) { mbuf->l3_len = iphlen; - mbuf->l4_len = ntohs(iph->tot_len) - iphlen; + mbuf->l4_len = (th->doff << 2); th->check = rte_ipv4_phdr_cksum((struct ipv4_hdr*)iph, mbuf->ol_flags); } else { if (mbuf_may_pull(mbuf, mbuf->pkt_len) != 0) @@ -1046,7 +1046,7 @@ static int syn_proxy_build_tcp_rst(int af, struct rte_mbuf *mbuf, if (likely(mbuf->ol_flags & PKT_TX_TCP_CKSUM)) { mbuf->l3_len = l3_len; mbuf->l4_len = l4_len; - th->check = ip4_phdr_cksum((struct ipv4_hdr*)ip4h, mbuf->ol_flags); + th->check = rte_ipv4_phdr_cksum((struct ipv4_hdr*)ip4h, mbuf->ol_flags); } else { if (mbuf_may_pull(mbuf, mbuf->pkt_len) != 0) return EDPVS_INVPKT; From 4c595b5a7b67af250dce6b1ced871c5977b0250e Mon Sep 17 00:00:00 2001 From: zhangtengfei-oppo Date: Fri, 19 Feb 2021 17:18:38 +0800 Subject: [PATCH 15/35] support mtu config --- conf/dpvs.conf.sample | 2 ++ src/netif.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/conf/dpvs.conf.sample b/conf/dpvs.conf.sample index 5b7d9df64..74e2910f4 100644 --- a/conf/dpvs.conf.sample +++ b/conf/dpvs.conf.sample @@ -40,6 +40,7 @@ netif_defs { } ! promisc_mode kni_name dpdk0.kni + mtu 1500 } device dpdk1 { @@ -59,6 +60,7 @@ netif_defs { } ! promisc_mode kni_name dpdk1.kni + mtu 1500 } ! bonding bond0 { diff --git a/src/netif.c b/src/netif.c index e839cdcfe..22df3aeed 100644 --- a/src/netif.c +++ b/src/netif.c @@ -88,6 +88,7 @@ struct port_conf_stream { int rx_queue_nb; int rx_desc_nb; char rss[32]; + int mtu; int tx_queue_nb; int tx_desc_nb; @@ -509,6 +510,28 @@ static void promisc_mode_handler(vector_t tokens) current_device->promisc_mode = true; } +static void custom_mtu_handler(vector_t tokens) +{ + char *str = set_value(tokens); + int mtu = 0; + struct port_conf_stream *current_device = list_entry(port_list.next, + struct port_conf_stream, port_list_node); + + assert(str); + mtu = atoi(str); + if (mtu <= 0 || mtu > NETIF_MAX_ETH_MTU) { + RTE_LOG(WARNING, NETIF, "invalid %s:MTU %s, using default %d\n", + current_device->name, str, NETIF_DEFAULT_ETH_MTU); + current_device->mtu= NETIF_DEFAULT_ETH_MTU; + } else { + RTE_LOG(INFO, NETIF, "%s:mtu = %d\n", + current_device->name, mtu); + current_device->mtu = mtu; + } + + FREE_PTR(str); + +} static void kni_name_handler(vector_t tokens) { char *str = set_value(tokens); @@ -877,6 +900,7 @@ void install_netif_keywords(void) install_keyword("filter", fdir_filter_handler, KW_TYPE_INIT); install_sublevel_end(); install_keyword("promisc_mode", promisc_mode_handler, KW_TYPE_INIT); + install_keyword("mtu", custom_mtu_handler,KW_TYPE_INIT); install_keyword("kni_name", kni_name_handler, KW_TYPE_INIT); install_sublevel_end(); install_keyword("bonding", bonding_handler, KW_TYPE_INIT); @@ -3516,6 +3540,8 @@ static void fill_port_config(struct netif_port *port, char *promisc_on) port->dev_conf.fdir_conf.mode = cfg_stream->fdir_mode; port->dev_conf.fdir_conf.pballoc = cfg_stream->fdir_pballoc; port->dev_conf.fdir_conf.status = cfg_stream->fdir_status; + if(cfg_stream->mtu != 0) + port->mtu = cfg_stream->mtu; if (cfg_stream->rx_queue_nb > 0 && port->nrxq > cfg_stream->rx_queue_nb) { RTE_LOG(WARNING, NETIF, "%s: rx-queues(%d) configured in workers != " @@ -3662,6 +3688,8 @@ int netif_port_start(struct netif_port *port) // device configure if ((ret = netif_port_fdir_dstport_mask_set(port)) != EDPVS_OK) return ret; + if ((ret = rte_eth_dev_set_mtu(port->id,port->mtu) != EDPVS_OK) + return ret; if (port->flag & NETIF_PORT_FLAG_TX_IP_CSUM_OFFLOAD) port->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; From 182ffcf8bd72a93989bd80bfacd9d6a3f38a2e7a Mon Sep 17 00:00:00 2001 From: weiyanhua Date: Wed, 27 Jan 2021 11:26:25 +0800 Subject: [PATCH 16/35] fix:Ping failure in snat mode, beacuse sending and receiving cores are different. scenario: (1) Single arm (2) RSS:IP, The source port number of UDP packets is not fixed(only destination port number is fixed),the rss can only choose ip mode (3) dpvs works in snat mode (4) For response packets: tcp/udp packets can be assigned to the fixed core using fdir,but icmp packets cannot use fdir solution: (1) Send the icmp message to the fixed core for forwarding (2) Use "icmp_fwd_core" to specify the core used to forward icmp (3) In actual scenarios, the icmp traffic is relatively small, and has little impact on other traffic --- conf/dpvs.bond.conf.sample | 1 + conf/dpvs.conf.items | 1 + conf/dpvs.conf.sample | 1 + conf/dpvs.conf.single-bond.sample | 1 + conf/dpvs.conf.single-nic.sample | 1 + include/icmp.h | 5 ++ include/ipv4.h | 1 + include/netif.h | 2 +- src/config.mk | 1 + src/icmp.c | 108 ++++++++++++++++++++++++++++++ src/ipv4.c | 17 ++++- src/netif.c | 21 +++++- 12 files changed, 156 insertions(+), 4 deletions(-) diff --git a/conf/dpvs.bond.conf.sample b/conf/dpvs.bond.conf.sample index 5465593b6..ba9de9834 100644 --- a/conf/dpvs.bond.conf.sample +++ b/conf/dpvs.bond.conf.sample @@ -246,6 +246,7 @@ worker_defs { worker cpu8 { type slave cpu_id 8 + icmp_redirect_core port bond0 { rx_queue_ids 7 tx_queue_ids 7 diff --git a/conf/dpvs.conf.items b/conf/dpvs.conf.items index 978490960..5b5592184 100644 --- a/conf/dpvs.conf.items +++ b/conf/dpvs.conf.items @@ -135,6 +135,7 @@ worker_defs { worker cpu5 { type kni cpu_id 5 + icmp_redirect_core port dpdk0 { tx_queue_ids 6 } diff --git a/conf/dpvs.conf.sample b/conf/dpvs.conf.sample index 5b7d9df64..cfbd82ee3 100644 --- a/conf/dpvs.conf.sample +++ b/conf/dpvs.conf.sample @@ -199,6 +199,7 @@ worker_defs { worker cpu8 { type slave cpu_id 8 + icmp_redirect_core port dpdk0 { rx_queue_ids 7 tx_queue_ids 7 diff --git a/conf/dpvs.conf.single-bond.sample b/conf/dpvs.conf.single-bond.sample index a1c58ffa4..75b3103de 100644 --- a/conf/dpvs.conf.single-bond.sample +++ b/conf/dpvs.conf.single-bond.sample @@ -156,6 +156,7 @@ worker_defs { worker cpu8 { type slave cpu_id 8 + icmp_redirect_core port bond0 { rx_queue_ids 7 tx_queue_ids 7 diff --git a/conf/dpvs.conf.single-nic.sample b/conf/dpvs.conf.single-nic.sample index ca8e6a8d6..ca12029d0 100644 --- a/conf/dpvs.conf.single-nic.sample +++ b/conf/dpvs.conf.single-nic.sample @@ -129,6 +129,7 @@ worker_defs { worker cpu8 { type slave cpu_id 8 + icmp_redirect_core port dpdk0 { rx_queue_ids 7 tx_queue_ids 7 diff --git a/include/icmp.h b/include/icmp.h index b54d37448..8ff582434 100644 --- a/include/icmp.h +++ b/include/icmp.h @@ -26,4 +26,9 @@ void icmp_send(struct rte_mbuf *imbuf, int type, int code, uint32_t info); #define icmp4_id(icmph) (((icmph)->un).echo.id) +#ifdef CONFIG_ICMP_REDIRECT_CORE +int icmp_recv_proc(struct rte_mbuf *mbuf); +void icmp_redirect_proc(void *args); +extern lcoreid_t g_icmp_redirect_lcore_id; +#endif #endif /* __DPVS_ICMP_H__ */ diff --git a/include/ipv4.h b/include/ipv4.h index 17bd6037a..f024db84b 100644 --- a/include/ipv4.h +++ b/include/ipv4.h @@ -119,6 +119,7 @@ int ip4_defrag(struct rte_mbuf *mbuf, int user); uint32_t ip4_select_id(struct ipv4_hdr *iph); int ipv4_local_out(struct rte_mbuf *mbuf); +int ipv4_rcv_fin(struct rte_mbuf *mbuf); /* helper functions */ static inline struct ipv4_hdr *ip4_hdr(const struct rte_mbuf *mbuf) diff --git a/include/netif.h b/include/netif.h index fe180cdfb..80d8bab25 100644 --- a/include/netif.h +++ b/include/netif.h @@ -312,7 +312,7 @@ int netif_ctrl_term(void); /* netif ctrl plane cleanup */ void netif_cfgfile_init(void); void netif_keyword_value_init(void); void install_netif_keywords(void); - +void kni_ingress(struct rte_mbuf *mbuf, struct netif_port *dev); static inline void *netif_priv(struct netif_port *dev) { diff --git a/src/config.mk b/src/config.mk index a87430f11..c9871fc96 100644 --- a/src/config.mk +++ b/src/config.mk @@ -44,6 +44,7 @@ CFLAGS += -D DPVS_MAX_LCORE=64 #CFLAGS += -D CONFIG_NDISC_DEBUG #CFLAGS += -D CONFIG_MSG_DEBUG #CFLAGS += -D CONFIG_DPVS_MP_DEBUG +#CFLAGS += -D CONFIG_ICMP_REDIRECT_CORE ifeq ($(CONFIG_PDUMP), y) CFLAGS += -D CONFIG_DPVS_PDUMP diff --git a/src/icmp.c b/src/icmp.c index 00768fa6b..240311fc8 100644 --- a/src/icmp.c +++ b/src/icmp.c @@ -20,6 +20,11 @@ #include "icmp.h" #include "netinet/in.h" #include "netinet/ip_icmp.h" +#ifdef CONFIG_ICMP_REDIRECT_CORE +#include "netif.h" +#include "scheduler.h" +#include "global_data.h" +#endif #define ICMP #define RTE_LOGTYPE_ICMP RTE_LOGTYPE_USER1 @@ -318,10 +323,113 @@ static struct inet_protocol icmp_protocol = { .handler = icmp_rcv, }; +#ifdef CONFIG_ICMP_REDIRECT_CORE +static struct rte_ring *icmp_redirect_ring; +#define ICMP_RING_SIZE 2048 +lcoreid_t g_icmp_redirect_lcore_id = 0; + +static struct dpvs_lcore_job icmp_redirect = { + .name = "icmp_redirect_proc", + .type = LCORE_JOB_LOOP, + .func = icmp_redirect_proc, + .data = NULL, +}; + +static int icmp_redirect_init(void) +{ + int ret = 0; + int socket_id; + + socket_id = rte_socket_id(); + icmp_redirect_ring = rte_ring_create("icmp_redirect_ring", ICMP_RING_SIZE, socket_id, RING_F_SC_DEQ); + if (icmp_redirect_ring == NULL) { + rte_panic("create ring:icmp_redirect_ring failed!\n"); + return EDPVS_NOMEM; + } + + ret = dpvs_lcore_job_register(&icmp_redirect, LCORE_ROLE_FWD_WORKER); + if (ret < 0) { + rte_ring_free(icmp_redirect_ring); + return ret; + } + + return EDPVS_OK; +} + +int icmp_recv_proc(struct rte_mbuf *mbuf) +{ + int ret = 0; + ret = rte_ring_enqueue(icmp_redirect_ring, mbuf); + if (unlikely(-EDQUOT == ret)) { + RTE_LOG(WARNING, ICMP, "%s: icmp ring quota exceeded\n", __func__); + } + else if (ret < 0) { + RTE_LOG(WARNING, ICMP, "%s: icmp ring enqueue failed\n", __func__); + rte_pktmbuf_free(mbuf); + } + + return 0; +} + +void icmp_redirect_proc(void *args) +{ + int ret = 0; + int i = 0; + lcoreid_t cid; + struct rte_mbuf *mbufs[NETIF_MAX_PKT_BURST]; + uint16_t nb_rb = 0; + uint16_t data_off; + + cid = rte_lcore_id(); + if (cid != g_icmp_redirect_lcore_id) + return; + + nb_rb = rte_ring_dequeue_burst(icmp_redirect_ring, (void**)mbufs, NETIF_MAX_PKT_BURST, NULL); + if (nb_rb <= 0) { + return; + } + + for (i = 0; i < nb_rb; i++) { + struct rte_mbuf *mbuf = mbufs[i]; + struct netif_port *dev = netif_port_get(mbuf->port); + + /* Remove ether_hdr at the beginning of an mbuf */ + data_off = mbuf->data_off; + if (unlikely(NULL == rte_pktmbuf_adj(mbuf, sizeof(struct ether_hdr)))) { + rte_pktmbuf_free(mbuf); + return; + } + + ret = INET_HOOK(AF_INET, INET_HOOK_PRE_ROUTING, + mbuf, dev, NULL, ipv4_rcv_fin); + if (ret == EDPVS_KNICONTINUE) { + if (dev->flag & NETIF_PORT_FLAG_FORWARD2KNI) { + rte_pktmbuf_free(mbuf); + return; + } + if (likely(NULL != rte_pktmbuf_prepend(mbuf, + (mbuf->data_off - data_off)))) { + kni_ingress(mbuf, dev); + } else { + rte_pktmbuf_free(mbuf); + } + } + } + + return; +} +#endif + int icmp_init(void) { int err; +#ifdef CONFIG_ICMP_REDIRECT_CORE + err = icmp_redirect_init(); + if (err) + return err; +#endif + err = ipv4_register_protocol(&icmp_protocol, IPPROTO_ICMP); return err; diff --git a/src/ipv4.c b/src/ipv4.c index 2c054b3c7..978b8f770 100644 --- a/src/ipv4.c +++ b/src/ipv4.c @@ -326,7 +326,7 @@ static int ip4_rcv_options(struct rte_mbuf *mbuf) return EDPVS_OK; } -static int ipv4_rcv_fin(struct rte_mbuf *mbuf) +int ipv4_rcv_fin(struct rte_mbuf *mbuf) { int err; struct route_entry *rt = NULL; @@ -378,6 +378,9 @@ static int ipv4_rcv_fin(struct rte_mbuf *mbuf) static int ipv4_rcv(struct rte_mbuf *mbuf, struct netif_port *port) { +#ifdef CONFIG_ICMP_REDIRECT_CORE + struct icmphdr *ich, _icmph; +#endif struct ipv4_hdr *iph; uint16_t hlen, len; eth_type_t etype = mbuf->packet_type; /* FIXME: use other field ? */ @@ -430,6 +433,18 @@ static int ipv4_rcv(struct rte_mbuf *mbuf, struct netif_port *port) if (unlikely(iph->next_proto_id == IPPROTO_OSPF)) return EDPVS_KNICONTINUE; +#ifdef CONFIG_ICMP_REDIRECT_CORE + else if (unlikely(iph->next_proto_id == IPPROTO_ICMP)) { + ich = mbuf_header_pointer(mbuf, hlen, sizeof(_icmph), &_icmph); + if (unlikely(!ich)) + goto drop; + if (ich->type == ICMP_ECHOREPLY || ich->type == ICMP_ECHO) { + rte_pktmbuf_prepend(mbuf, (uint16_t)sizeof(struct ether_hdr)); + icmp_recv_proc(mbuf); + return EDPVS_OK; + } + } +#endif return INET_HOOK(AF_INET, INET_HOOK_PRE_ROUTING, mbuf, port, NULL, ipv4_rcv_fin); diff --git a/src/netif.c b/src/netif.c index e839cdcfe..e686b55ef 100644 --- a/src/netif.c +++ b/src/netif.c @@ -43,6 +43,9 @@ #include #include #include +#ifdef CONFIG_ICMP_REDIRECT_CORE +#include "icmp.h" +#endif #define NETIF_PKTPOOL_NB_MBUF_DEF 65535 #define NETIF_PKTPOOL_NB_MBUF_MIN 1023 @@ -144,7 +147,6 @@ static struct list_head port_ntab[NETIF_PORT_TABLE_BUCKETS]; /* hashed by name * #define NETIF_CTRL_BUFFER_LEN 4096 /* function declarations */ -static void kni_ingress(struct rte_mbuf *mbuf, struct netif_port *dev); static void kni_lcore_loop(void *dummy); @@ -709,6 +711,18 @@ static void cpu_id_handler(vector_t tokens) FREE_PTR(str); } +#ifdef CONFIG_ICMP_REDIRECT_CORE +static void cpu_icmp_redirect_handler(vector_t tokens) +{ + struct worker_conf_stream *current_worker = list_entry(worker_list.next, + struct worker_conf_stream, worker_list_node); + + RTE_LOG(INFO, NETIF, "%s(%d) used to redirect icmp packets\n", + current_worker->name, current_worker->cpu_id); + g_icmp_redirect_lcore_id = current_worker->cpu_id; +} +#endif + static void worker_port_handler(vector_t tokens) { assert(VECTOR_SIZE(tokens) >= 1); @@ -892,6 +906,9 @@ void install_netif_keywords(void) install_sublevel(); install_keyword("type", worker_type_handler, KW_TYPE_INIT); install_keyword("cpu_id", cpu_id_handler, KW_TYPE_INIT); +#ifdef CONFIG_ICMP_REDIRECT_CORE + install_keyword("icmp_redirect_core", cpu_icmp_redirect_handler, KW_TYPE_INIT); +#endif install_keyword("port", worker_port_handler, KW_TYPE_INIT); install_sublevel(); install_keyword("rx_queue_ids", rx_queue_ids_handler, KW_TYPE_INIT); @@ -2665,7 +2682,7 @@ static inline void free_mbufs(struct rte_mbuf **pkts, unsigned num) } } -static void kni_ingress(struct rte_mbuf *mbuf, struct netif_port *dev) +void kni_ingress(struct rte_mbuf *mbuf, struct netif_port *dev) { if (!kni_dev_exist(dev)) goto freepkt; From 8297573e51defaa0ba32d637bb7b502701a0aa89 Mon Sep 17 00:00:00 2001 From: zhangtengfei-oppo Date: Tue, 23 Feb 2021 20:54:05 +0800 Subject: [PATCH 17/35] fix bug: bond mtu not set; add feature:mtu config option --- conf/dpvs.bond.conf.sample | 4 ++++ conf/dpvs.conf.items | 2 ++ conf/dpvs.conf.sample | 4 ++-- conf/dpvs.conf.single-bond.sample | 2 ++ conf/dpvs.conf.single-nic.sample | 1 + include/netif.h | 4 ++++ src/netif.c | 3 +++ 7 files changed, 18 insertions(+), 2 deletions(-) diff --git a/conf/dpvs.bond.conf.sample b/conf/dpvs.bond.conf.sample index 5465593b6..541c8516b 100644 --- a/conf/dpvs.bond.conf.sample +++ b/conf/dpvs.bond.conf.sample @@ -38,6 +38,7 @@ netif_defs { pballoc 64k status matched } + ! mtu 1500 ! promisc_mode ! kni_name dpdk0.kni } @@ -57,6 +58,7 @@ netif_defs { pballoc 64k status matched } + ! mtu 1500 ! promisc_mode ! kni_name dpdk1.kni } @@ -77,6 +79,7 @@ netif_defs { pballoc 64k status matched } + ! mtu 1500 ! promisc_mode ! kni_name dpdk2.kni } @@ -96,6 +99,7 @@ netif_defs { pballoc 64k status matched } + ! mtu 1500 ! promisc_mode ! kni_name dpdk3.kni } diff --git a/conf/dpvs.conf.items b/conf/dpvs.conf.items index 978490960..d9611c165 100644 --- a/conf/dpvs.conf.items +++ b/conf/dpvs.conf.items @@ -40,6 +40,7 @@ netif_defs { pballoc 64k <64k, 64k|128k|256k> status matched } + ! mtu 1500 ! promisc_mode ! kni_name dpdk0.kni } @@ -55,6 +56,7 @@ netif_defs { queue_number 4 descriptor_number 256 } + ! mtu 1500 ! promisc_mode ! kni_name dpdk1.kni } diff --git a/conf/dpvs.conf.sample b/conf/dpvs.conf.sample index 74e2910f4..e375a160c 100644 --- a/conf/dpvs.conf.sample +++ b/conf/dpvs.conf.sample @@ -40,7 +40,7 @@ netif_defs { } ! promisc_mode kni_name dpdk0.kni - mtu 1500 + !mtu 1500 } device dpdk1 { @@ -60,7 +60,7 @@ netif_defs { } ! promisc_mode kni_name dpdk1.kni - mtu 1500 + ! mtu 1500 } ! bonding bond0 { diff --git a/conf/dpvs.conf.single-bond.sample b/conf/dpvs.conf.single-bond.sample index a1c58ffa4..ffb41e499 100644 --- a/conf/dpvs.conf.single-bond.sample +++ b/conf/dpvs.conf.single-bond.sample @@ -39,6 +39,7 @@ netif_defs { } ! promisc_mode ! kni_name dpdk0.kni + ! mtu 1500 } device dpdk2 { @@ -58,6 +59,7 @@ netif_defs { } ! promisc_mode ! kni_name dpdk2.kni + ! mtu 1500 } bonding bond0 { diff --git a/conf/dpvs.conf.single-nic.sample b/conf/dpvs.conf.single-nic.sample index ca8e6a8d6..507ebbd27 100644 --- a/conf/dpvs.conf.single-nic.sample +++ b/conf/dpvs.conf.single-nic.sample @@ -37,6 +37,7 @@ netif_defs { pballoc 64k status matched } + ! mtu 1500 ! promisc_mode kni_name dpdk0.kni } diff --git a/include/netif.h b/include/netif.h index fe180cdfb..39c3e6463 100644 --- a/include/netif.h +++ b/include/netif.h @@ -62,6 +62,10 @@ enum { /* maximum number of DPDK rte device */ #define NETIF_MAX_RTE_PORTS 64 +#define NETIF_MAX_ETH_MTU 9000 +#define NETIF_DEFAULT_ETH_MTU 1500 + + #define NETIF_ALIGN 32 #define NETIF_PORT_ID_INVALID 0xFF diff --git a/src/netif.c b/src/netif.c index 22df3aeed..3ae245e2b 100644 --- a/src/netif.c +++ b/src/netif.c @@ -3409,6 +3409,9 @@ static inline void port_mtu_set(struct netif_port *port) mtu = t_mtu; } port->mtu = mtu; + + rte_eth_dev_set_mtu((uint8_t)port->id,port->mtu); + } /* From f5010d1ed81aae8f808535efbc992680e19a4974 Mon Sep 17 00:00:00 2001 From: ZhangTengfei Date: Fri, 26 Feb 2021 19:01:30 +0800 Subject: [PATCH 18/35] coding sytle aliments --- conf/dpvs.bond.conf.sample | 778 +++++++++++++++--------------- conf/dpvs.conf.items | 528 ++++++++++---------- conf/dpvs.conf.sample | 680 +++++++++++++------------- conf/dpvs.conf.single-bond.sample | 576 +++++++++++----------- conf/dpvs.conf.single-nic.sample | 520 ++++++++++---------- src/netif.c | 18 +- 6 files changed, 1550 insertions(+), 1550 deletions(-) diff --git a/conf/dpvs.bond.conf.sample b/conf/dpvs.bond.conf.sample index 541c8516b..e6ed1bdbd 100644 --- a/conf/dpvs.bond.conf.sample +++ b/conf/dpvs.bond.conf.sample @@ -1,389 +1,389 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! This is dpvs default configuration file. -! -! The attribute "" denotes the configuration item at initialization stage. Item of -! this type is configured oneshoot and not reloadable. If invalid value configured in the -! file, dpvs would use its default value. -! -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - log_level WARNING - ! log_file /var/log/dpvs.log - ! log_async_mode off - ! pdump off -} - -! netif config -netif_defs { - pktpool_size 1048575 - pktpool_cache 256 - - device dpdk0 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk0.kni - } - - device dpdk1 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk1.kni - } - - - device dpdk2 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk2.kni - } - - device dpdk3 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk3.kni - } - - bonding bond0 { - mode 0 - slave dpdk0 - slave dpdk1 - primary dpdk0 - kni_name bond0.kni - } - - bonding bond1 { - mode 0 - slave dpdk2 - slave dpdk3 - primary dpdk2 - kni_name bond1.kni - } -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - type master - cpu_id 0 - } - - worker cpu1 { - type slave - cpu_id 1 - port bond0 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port bond0 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port bond0 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port bond0 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu5 { - type slave - cpu_id 5 - port bond0 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu6 { - type slave - cpu_id 6 - port bond0 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu7 { - type slave - cpu_id 7 - port bond0 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu8 { - type slave - cpu_id 8 - port bond0 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - } - - ! worker cpu9 { - ! type kni - ! cpu_id 9 - ! port bond0 { - ! tx_queue_ids 8 - ! } - ! port bond1 { - ! tx_queue_ids 8 - ! } - !} - -} - -! timer config -timer_defs { - # cpu job loops to schedule dpdk timer management - schedule_interval 500 -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 - timeout 60 -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 - fragment { - bucket_number 4096 - bucket_entries 16 - max_entries 4096 - ttl 1 - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method hlist - recycle_time 10 - } -} - -! control plane config -ctrl_defs { - lcore_msg { - ring_size 4096 - sync_msg_timeout_us 20000 - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 - conn_pool_cache 256 - conn_init_timeout 3 - ! expire_quiescent_template - ! fast_xmit_close - ! redirect off - } - - udp { - ! defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { - normal 300 - last 3 - } - } - - tcp { - ! defence_tcp_drop - timeout { - none 2 - established 90 - syn_sent 3 - syn_recv 30 - fin_wait 7 - time_wait 7 - close 3 - close_wait 7 - last_ack 7 - listen 120 - synack 30 - last 2 - } - synproxy { - synack_options { - mss 1452 - ttl 63 - sack - ! wscale - ! timestamp - } - ! defer_rs_syn - rs_syn_max_retry 3 - ack_storm_thresh 10 - max_ack_saved 3 - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -! sa_pool config -sa_pool { - pool_hash_size 16 -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! This is dpvs default configuration file. +! +! The attribute "" denotes the configuration item at initialization stage. Item of +! this type is configured oneshoot and not reloadable. If invalid value configured in the +! file, dpvs would use its default value. +! +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + log_level WARNING + ! log_file /var/log/dpvs.log + ! log_async_mode off + ! pdump off +} + +! netif config +netif_defs { + pktpool_size 1048575 + pktpool_cache 256 + + device dpdk0 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk0.kni + } + + device dpdk1 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk1.kni + } + + + device dpdk2 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk2.kni + } + + device dpdk3 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk3.kni + } + + bonding bond0 { + mode 0 + slave dpdk0 + slave dpdk1 + primary dpdk0 + kni_name bond0.kni + } + + bonding bond1 { + mode 0 + slave dpdk2 + slave dpdk3 + primary dpdk2 + kni_name bond1.kni + } +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + type master + cpu_id 0 + } + + worker cpu1 { + type slave + cpu_id 1 + port bond0 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port bond0 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port bond0 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port bond0 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu5 { + type slave + cpu_id 5 + port bond0 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu6 { + type slave + cpu_id 6 + port bond0 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu7 { + type slave + cpu_id 7 + port bond0 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu8 { + type slave + cpu_id 8 + port bond0 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + } + + ! worker cpu9 { + ! type kni + ! cpu_id 9 + ! port bond0 { + ! tx_queue_ids 8 + ! } + ! port bond1 { + ! tx_queue_ids 8 + ! } + !} + +} + +! timer config +timer_defs { + # cpu job loops to schedule dpdk timer management + schedule_interval 500 +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 + timeout 60 +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 + fragment { + bucket_number 4096 + bucket_entries 16 + max_entries 4096 + ttl 1 + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method hlist + recycle_time 10 + } +} + +! control plane config +ctrl_defs { + lcore_msg { + ring_size 4096 + sync_msg_timeout_us 20000 + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 + conn_pool_cache 256 + conn_init_timeout 3 + ! expire_quiescent_template + ! fast_xmit_close + ! redirect off + } + + udp { + ! defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { + normal 300 + last 3 + } + } + + tcp { + ! defence_tcp_drop + timeout { + none 2 + established 90 + syn_sent 3 + syn_recv 30 + fin_wait 7 + time_wait 7 + close 3 + close_wait 7 + last_ack 7 + listen 120 + synack 30 + last 2 + } + synproxy { + synack_options { + mss 1452 + ttl 63 + sack + ! wscale + ! timestamp + } + ! defer_rs_syn + rs_syn_max_retry 3 + ack_storm_thresh 10 + max_ack_saved 3 + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +! sa_pool config +sa_pool { + pool_hash_size 16 +} diff --git a/conf/dpvs.conf.items b/conf/dpvs.conf.items index d9611c165..a27f1c23f 100644 --- a/conf/dpvs.conf.items +++ b/conf/dpvs.conf.items @@ -1,264 +1,264 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! All dpvs configuration items and corresponding attributes are listed in this file. -! The attributes including: -! * item type: | default -! * item default value -! * item value range -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - #daemon - log_level INFO - log_file /var/log/dpvs.log - log_async_mode off - pdump off -} - -! netif config -netif_defs { - pktpool_size 2097151 <65535, 1023-134217728> - pktpool_cache 256 <256, 32-8192> - - device dpdk0 { - rx { - #max_burst_size 32 - queue_number 6 <16, 0-16> - descriptor_number 256 <256, 16-8192> - rss all - } - tx { - queue_number 6 <16, 0-16> - descriptor_number 512 <512, 16-8192> - } - fdir { - filter on - mode perfect - pballoc 64k <64k, 64k|128k|256k> - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk0.kni - } - - device dpdk1 { - rx { - #max_burst_size 32 - queue_number 4 - descriptor_number 128 - rss all - } - tx { - queue_number 4 - descriptor_number 256 - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk1.kni - } - - device bond0 { - mode 4 <0-6> - slave dpdk0 - slave dpdk1 - primary dpdk0 - kni_name bond0.kni - } -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - cpu_id 0 - type master - } - - worker cpu1 { - type slave - cpu_id 1 which cpu the worker thread runs on - port dpdk0 { - rx_queue_ids 0 4 <0, 0-16, space separated list> - tx_queue_ids 0 <0, 0-16, space separated list> - isol_rx_cpu_ids 8 8 - isol_rxq_ring_sz 1048576 <1048576, 1024-2147483648, for all queues> - } - port dpdk1 { - rx_queue_ids 0 - tx_queue_ids 0 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port dpdk0 { - rx_queue_ids 1 5 - tx_queue_ids 1 - } - port dpdk1 { - rx_queue_ids 1 - tx_queue_ids 1 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port dpdk0 { - rx_queue_ids 2 - tx_queue_ids 2 4 5 - isol_rx_cpu_ids 9 [invalid id] 10 - } - port dpdk1 { - rx_queue_ids 2 - tx_queue_ids 2 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port dpdk0 { - rx_queue_ids 3 - tx_queue_ids 3 - } - port dpdk1 { - rx_queue_ids 3 - tx_queue_ids 3 - } - } - - ! kni worker config, optional - ! if not configure, kni packets are processed on master lcore - worker cpu5 { - type kni - cpu_id 5 - port dpdk0 { - tx_queue_ids 6 - } - port dpdk1 { - tx_queue_ids 4 - } - } -} - -! timer config -timer_defs { - # time interval(us) to schedule dpdk timer management - schedule_interval 500 <10, 1-10000000> -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 <128, 16-8192> - timeout 60 <60, 1-3600> -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 <64, 0-255> - fragment { - bucket_number 4096 <4096, 32-65536> - bucket_entries 16 <16, 1-256> - max_entries 409600 <4096, 32-65536> - ttl 1 <1, 1-255> - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method "hlist" <"hlist"/"lpm"> - recycle_time 10 <10, 1-36000> - lpm { - lpm6_max_rules 1024 <1024, 16-2147483647> - lpm6_num_tbl8s 65536 <65536, 16-2147483647> - rt6_array_size 65536 <65536, 16-2147483647> - rt6_hash_bucket 256 <256, 2-2147483647> - } - } -} - -! control plane config -ctrl_defs { - lcore_msg { - #bucket_number 256 - ring_size 4096 <4096, 256-524288> - sync_msg_timeout_us 2000 <2000, 1-∞> - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 <2097152, 65536-∞> - conn_pool_cache 256 <256, 1-∞> - conn_init_timeout 3 <3, 1-31535999> - expire_quiescent_template - fast_xmit_close - redirect off - } - - udp { - defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { <1-31535999> - normal 300 <300> - last 3 <3> - } - } - - tcp { - defence_tcp_drop - timeout { <1-31535999> - none 3 <2> - established 91 <90> - syn_sent 4 <3> - syn_recv 31 <30> - fin_wait 8 <7> - time_wait 8 <7> - close 4 <3> - close_wait 8 <7> - last_ack 8 <7> - listen 121 <120> - synack 31 <30> - last 3 <2> - } - synproxy { - synack_options { - mss 1452 <1452, 1-65535> - ttl 63 <63, 1-255> - sack - ! wscale - ! timestamp - } - !defer_rs_syn - rs_syn_max_retry 3 <3, 1-99> - ack_storm_thresh 10 <10, 1-999> - max_ack_saved 3 <1, 63> - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -sa_pool { - pool_hash_size 16 <16, 1-128> -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! All dpvs configuration items and corresponding attributes are listed in this file. +! The attributes including: +! * item type: | default +! * item default value +! * item value range +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + #daemon + log_level INFO + log_file /var/log/dpvs.log + log_async_mode off + pdump off +} + +! netif config +netif_defs { + pktpool_size 2097151 <65535, 1023-134217728> + pktpool_cache 256 <256, 32-8192> + + device dpdk0 { + rx { + #max_burst_size 32 + queue_number 6 <16, 0-16> + descriptor_number 256 <256, 16-8192> + rss all + } + tx { + queue_number 6 <16, 0-16> + descriptor_number 512 <512, 16-8192> + } + fdir { + filter on + mode perfect + pballoc 64k <64k, 64k|128k|256k> + status matched + } + mtu 1500 <1500,0-9000> + promisc_mode + kni_name dpdk0.kni + } + + device dpdk1 { + rx { + #max_burst_size 32 + queue_number 4 + descriptor_number 128 + rss all + } + tx { + queue_number 4 + descriptor_number 256 + } + mtu 1500 + promisc_mode + kni_name dpdk1.kni + } + + device bond0 { + mode 4 <0-6> + slave dpdk0 + slave dpdk1 + primary dpdk0 + kni_name bond0.kni + } +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + cpu_id 0 + type master + } + + worker cpu1 { + type slave + cpu_id 1 which cpu the worker thread runs on + port dpdk0 { + rx_queue_ids 0 4 <0, 0-16, space separated list> + tx_queue_ids 0 <0, 0-16, space separated list> + isol_rx_cpu_ids 8 8 + isol_rxq_ring_sz 1048576 <1048576, 1024-2147483648, for all queues> + } + port dpdk1 { + rx_queue_ids 0 + tx_queue_ids 0 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port dpdk0 { + rx_queue_ids 1 5 + tx_queue_ids 1 + } + port dpdk1 { + rx_queue_ids 1 + tx_queue_ids 1 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port dpdk0 { + rx_queue_ids 2 + tx_queue_ids 2 4 5 + isol_rx_cpu_ids 9 [invalid id] 10 + } + port dpdk1 { + rx_queue_ids 2 + tx_queue_ids 2 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port dpdk0 { + rx_queue_ids 3 + tx_queue_ids 3 + } + port dpdk1 { + rx_queue_ids 3 + tx_queue_ids 3 + } + } + + ! kni worker config, optional + ! if not configure, kni packets are processed on master lcore + worker cpu5 { + type kni + cpu_id 5 + port dpdk0 { + tx_queue_ids 6 + } + port dpdk1 { + tx_queue_ids 4 + } + } +} + +! timer config +timer_defs { + # time interval(us) to schedule dpdk timer management + schedule_interval 500 <10, 1-10000000> +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 <128, 16-8192> + timeout 60 <60, 1-3600> +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 <64, 0-255> + fragment { + bucket_number 4096 <4096, 32-65536> + bucket_entries 16 <16, 1-256> + max_entries 409600 <4096, 32-65536> + ttl 1 <1, 1-255> + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method "hlist" <"hlist"/"lpm"> + recycle_time 10 <10, 1-36000> + lpm { + lpm6_max_rules 1024 <1024, 16-2147483647> + lpm6_num_tbl8s 65536 <65536, 16-2147483647> + rt6_array_size 65536 <65536, 16-2147483647> + rt6_hash_bucket 256 <256, 2-2147483647> + } + } +} + +! control plane config +ctrl_defs { + lcore_msg { + #bucket_number 256 + ring_size 4096 <4096, 256-524288> + sync_msg_timeout_us 2000 <2000, 1-∞> + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 <2097152, 65536-∞> + conn_pool_cache 256 <256, 1-∞> + conn_init_timeout 3 <3, 1-31535999> + expire_quiescent_template + fast_xmit_close + redirect off + } + + udp { + defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { <1-31535999> + normal 300 <300> + last 3 <3> + } + } + + tcp { + defence_tcp_drop + timeout { <1-31535999> + none 3 <2> + established 91 <90> + syn_sent 4 <3> + syn_recv 31 <30> + fin_wait 8 <7> + time_wait 8 <7> + close 4 <3> + close_wait 8 <7> + last_ack 8 <7> + listen 121 <120> + synack 31 <30> + last 3 <2> + } + synproxy { + synack_options { + mss 1452 <1452, 1-65535> + ttl 63 <63, 1-255> + sack + ! wscale + ! timestamp + } + !defer_rs_syn + rs_syn_max_retry 3 <3, 1-99> + ack_storm_thresh 10 <10, 1-999> + max_ack_saved 3 <1, 63> + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +sa_pool { + pool_hash_size 16 <16, 1-128> +} diff --git a/conf/dpvs.conf.sample b/conf/dpvs.conf.sample index e375a160c..32d3227c9 100644 --- a/conf/dpvs.conf.sample +++ b/conf/dpvs.conf.sample @@ -1,340 +1,340 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! This is dpvs default configuration file. -! -! The attribute "" denotes the configuration item at initialization stage. Item of -! this type is configured oneshoot and not reloadable. If invalid value configured in the -! file, dpvs would use its default value. -! -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - log_level WARNING - ! log_file /var/log/dpvs.log - ! log_async_mode on - ! pdump off -} - -! netif config -netif_defs { - pktpool_size 1048575 - pktpool_cache 256 - - device dpdk0 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! promisc_mode - kni_name dpdk0.kni - !mtu 1500 - } - - device dpdk1 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! promisc_mode - kni_name dpdk1.kni - ! mtu 1500 - } - - ! bonding bond0 { - ! mode 0 - ! slave dpdk0 - ! slave dpdk1 - ! primary dpdk0 - ! kni_name bond0.kni - !} -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - type master - cpu_id 0 - } - - worker cpu1 { - type slave - cpu_id 1 - port dpdk0 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port dpdk0 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port dpdk0 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port dpdk0 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu5 { - type slave - cpu_id 5 - port dpdk0 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu6 { - type slave - cpu_id 6 - port dpdk0 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu7 { - type slave - cpu_id 7 - port dpdk0 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu8 { - type slave - cpu_id 8 - port dpdk0 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - } - - ! worker cpu9 { - ! type kni - ! cpu_id 9 - ! port dpdk0 { - ! tx_queue_ids 8 - ! } - ! port dpdk1 { - ! tx_queue_ids 8 - ! } - !} - -} - -! timer config -timer_defs { - # cpu job loops to schedule dpdk timer management - schedule_interval 500 -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 - timeout 60 -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 - fragment { - bucket_number 4096 - bucket_entries 16 - max_entries 4096 - ttl 1 - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method hlist - recycle_time 10 - } -} - -! control plane config -ctrl_defs { - lcore_msg { - ring_size 4096 - sync_msg_timeout_us 20000 - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 - conn_pool_cache 256 - conn_init_timeout 3 - ! expire_quiescent_template - ! fast_xmit_close - ! redirect off - } - - udp { - ! defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { - normal 300 - last 3 - } - } - - tcp { - ! defence_tcp_drop - timeout { - none 2 - established 90 - syn_sent 3 - syn_recv 30 - fin_wait 7 - time_wait 7 - close 3 - close_wait 7 - last_ack 7 - listen 120 - synack 30 - last 2 - } - synproxy { - synack_options { - mss 1452 - ttl 63 - sack - ! wscale - ! timestamp - } - ! defer_rs_syn - rs_syn_max_retry 3 - ack_storm_thresh 10 - max_ack_saved 3 - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -! sa_pool config -sa_pool { - pool_hash_size 16 -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! This is dpvs default configuration file. +! +! The attribute "" denotes the configuration item at initialization stage. Item of +! this type is configured oneshoot and not reloadable. If invalid value configured in the +! file, dpvs would use its default value. +! +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + log_level WARNING + ! log_file /var/log/dpvs.log + ! log_async_mode on + ! pdump off +} + +! netif config +netif_defs { + pktpool_size 1048575 + pktpool_cache 256 + + device dpdk0 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + kni_name dpdk0.kni + } + + device dpdk1 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + kni_name dpdk1.kni + } + + ! bonding bond0 { + ! mode 0 + ! slave dpdk0 + ! slave dpdk1 + ! primary dpdk0 + ! kni_name bond0.kni + !} +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + type master + cpu_id 0 + } + + worker cpu1 { + type slave + cpu_id 1 + port dpdk0 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port dpdk0 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port dpdk0 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port dpdk0 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu5 { + type slave + cpu_id 5 + port dpdk0 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu6 { + type slave + cpu_id 6 + port dpdk0 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu7 { + type slave + cpu_id 7 + port dpdk0 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu8 { + type slave + cpu_id 8 + port dpdk0 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + } + + ! worker cpu9 { + ! type kni + ! cpu_id 9 + ! port dpdk0 { + ! tx_queue_ids 8 + ! } + ! port dpdk1 { + ! tx_queue_ids 8 + ! } + !} + +} + +! timer config +timer_defs { + # cpu job loops to schedule dpdk timer management + schedule_interval 500 +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 + timeout 60 +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 + fragment { + bucket_number 4096 + bucket_entries 16 + max_entries 4096 + ttl 1 + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method hlist + recycle_time 10 + } +} + +! control plane config +ctrl_defs { + lcore_msg { + ring_size 4096 + sync_msg_timeout_us 20000 + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 + conn_pool_cache 256 + conn_init_timeout 3 + ! expire_quiescent_template + ! fast_xmit_close + ! redirect off + } + + udp { + ! defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { + normal 300 + last 3 + } + } + + tcp { + ! defence_tcp_drop + timeout { + none 2 + established 90 + syn_sent 3 + syn_recv 30 + fin_wait 7 + time_wait 7 + close 3 + close_wait 7 + last_ack 7 + listen 120 + synack 30 + last 2 + } + synproxy { + synack_options { + mss 1452 + ttl 63 + sack + ! wscale + ! timestamp + } + ! defer_rs_syn + rs_syn_max_retry 3 + ack_storm_thresh 10 + max_ack_saved 3 + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +! sa_pool config +sa_pool { + pool_hash_size 16 +} diff --git a/conf/dpvs.conf.single-bond.sample b/conf/dpvs.conf.single-bond.sample index ffb41e499..2e17be2ec 100644 --- a/conf/dpvs.conf.single-bond.sample +++ b/conf/dpvs.conf.single-bond.sample @@ -1,288 +1,288 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! This is dpvs default configuration file. -! -! The attribute "" denotes the configuration item at initialization stage. Item of -! this type is configured oneshoot and not reloadable. If invalid value configured in the -! file, dpvs would use its default value. -! -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - log_level WARNING - ! log_file /var/log/dpvs.log - ! log_async_mode on -} - -! netif config -netif_defs { - pktpool_size 524287 - pktpool_cache 256 - - device dpdk0 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! promisc_mode - ! kni_name dpdk0.kni - ! mtu 1500 - } - - device dpdk2 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! promisc_mode - ! kni_name dpdk2.kni - ! mtu 1500 - } - - bonding bond0 { - mode 0 - slave dpdk0 - slave dpdk2 - primary dpdk0 - kni_name bond0.kni - } -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - type master - cpu_id 0 - } - - worker cpu1 { - type slave - cpu_id 1 - port bond0 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port bond0 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port bond0 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port bond0 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu5 { - type slave - cpu_id 5 - port bond0 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu6 { - type slave - cpu_id 6 - port bond0 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu7 { - type slave - cpu_id 7 - port bond0 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu8 { - type slave - cpu_id 8 - port bond0 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - } - - ! worker cpu9 { - ! type kni - ! cpu_id 9 - ! port bond0 { - ! tx_queue_ids 8 - ! } - !} - -} - -! timer config -timer_defs { - # cpu job loops to schedule dpdk timer management - schedule_interval 500 -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 - timeout 60 -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 - fragment { - bucket_number 4096 - bucket_entries 16 - max_entries 4096 - ttl 1 - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method hlist - recycle_time 10 - } -} - -! control plane config -ctrl_defs { - lcore_msg { - ring_size 4096 - sync_msg_timeout_us 20000 - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 - conn_pool_cache 256 - conn_init_timeout 3 - ! expire_quiescent_template - ! fast_xmit_close - ! redirect off - } - - udp { - ! defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { - normal 300 - last 3 - } - } - - tcp { - ! defence_tcp_drop - timeout { - none 2 - established 90 - syn_sent 3 - syn_recv 30 - fin_wait 7 - time_wait 7 - close 3 - close_wait 7 - last_ack 7 - listen 120 - synack 30 - last 2 - } - synproxy { - synack_options { - mss 1452 - ttl 63 - sack - ! wscale - ! timestamp - } - ! defer_rs_syn - rs_syn_max_retry 3 - ack_storm_thresh 10 - max_ack_saved 3 - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -! sa_pool config -sa_pool { - pool_hash_size 16 -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! This is dpvs default configuration file. +! +! The attribute "" denotes the configuration item at initialization stage. Item of +! this type is configured oneshoot and not reloadable. If invalid value configured in the +! file, dpvs would use its default value. +! +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + log_level WARNING + ! log_file /var/log/dpvs.log + ! log_async_mode on +} + +! netif config +netif_defs { + pktpool_size 524287 + pktpool_cache 256 + + device dpdk0 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk0.kni + } + + device dpdk2 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk2.kni + } + + bonding bond0 { + mode 0 + slave dpdk0 + slave dpdk2 + primary dpdk0 + kni_name bond0.kni + } +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + type master + cpu_id 0 + } + + worker cpu1 { + type slave + cpu_id 1 + port bond0 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port bond0 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port bond0 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port bond0 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu5 { + type slave + cpu_id 5 + port bond0 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu6 { + type slave + cpu_id 6 + port bond0 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu7 { + type slave + cpu_id 7 + port bond0 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu8 { + type slave + cpu_id 8 + port bond0 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + } + + ! worker cpu9 { + ! type kni + ! cpu_id 9 + ! port bond0 { + ! tx_queue_ids 8 + ! } + !} + +} + +! timer config +timer_defs { + # cpu job loops to schedule dpdk timer management + schedule_interval 500 +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 + timeout 60 +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 + fragment { + bucket_number 4096 + bucket_entries 16 + max_entries 4096 + ttl 1 + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method hlist + recycle_time 10 + } +} + +! control plane config +ctrl_defs { + lcore_msg { + ring_size 4096 + sync_msg_timeout_us 20000 + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 + conn_pool_cache 256 + conn_init_timeout 3 + ! expire_quiescent_template + ! fast_xmit_close + ! redirect off + } + + udp { + ! defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { + normal 300 + last 3 + } + } + + tcp { + ! defence_tcp_drop + timeout { + none 2 + established 90 + syn_sent 3 + syn_recv 30 + fin_wait 7 + time_wait 7 + close 3 + close_wait 7 + last_ack 7 + listen 120 + synack 30 + last 2 + } + synproxy { + synack_options { + mss 1452 + ttl 63 + sack + ! wscale + ! timestamp + } + ! defer_rs_syn + rs_syn_max_retry 3 + ack_storm_thresh 10 + max_ack_saved 3 + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +! sa_pool config +sa_pool { + pool_hash_size 16 +} diff --git a/conf/dpvs.conf.single-nic.sample b/conf/dpvs.conf.single-nic.sample index 507ebbd27..1ca74d6e8 100644 --- a/conf/dpvs.conf.single-nic.sample +++ b/conf/dpvs.conf.single-nic.sample @@ -1,260 +1,260 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! This is dpvs default configuration file. -! -! The attribute "" denotes the configuration item at initialization stage. Item of -! this type is configured oneshoot and not reloadable. If invalid value configured in the -! file, dpvs would use its default value. -! -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - log_level WARNING - ! log_file /var/log/dpvs.log - ! log_async_mode on -} - -! netif config -netif_defs { - pktpool_size 524287 - pktpool_cache 256 - - device dpdk0 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - kni_name dpdk0.kni - } -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - type master - cpu_id 0 - } - - worker cpu1 { - type slave - cpu_id 1 - port dpdk0 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port dpdk0 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port dpdk0 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port dpdk0 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu5 { - type slave - cpu_id 5 - port dpdk0 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu6 { - type slave - cpu_id 6 - port dpdk0 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu7 { - type slave - cpu_id 7 - port dpdk0 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu8 { - type slave - cpu_id 8 - port dpdk0 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - } - - ! worker cpu9 { - ! type kni - ! cpu_id 9 - ! port dpdk0 { - ! tx_queue_ids 8 - ! } - !} - -} - -! timer config -timer_defs { - # cpu job loops to schedule dpdk timer management - schedule_interval 500 -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 - timeout 60 -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 - fragment { - bucket_number 4096 - bucket_entries 16 - max_entries 4096 - ttl 1 - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method hlist - recycle_time 10 - } -} - -! control plane config -ctrl_defs { - lcore_msg { - ring_size 4096 - sync_msg_timeout_us 20000 - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 - conn_pool_cache 256 - conn_init_timeout 3 - ! expire_quiescent_template - ! fast_xmit_close - ! redirect off - } - - udp { - ! defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { - normal 300 - last 3 - } - } - - tcp { - ! defence_tcp_drop - timeout { - none 2 - established 90 - syn_sent 3 - syn_recv 30 - fin_wait 7 - time_wait 7 - close 3 - close_wait 7 - last_ack 7 - listen 120 - synack 30 - last 2 - } - synproxy { - synack_options { - mss 1452 - ttl 63 - sack - ! wscale - ! timestamp - } - ! defer_rs_syn - rs_syn_max_retry 3 - ack_storm_thresh 10 - max_ack_saved 3 - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -! sa_pool config -sa_pool { - pool_hash_size 16 -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! This is dpvs default configuration file. +! +! The attribute "" denotes the configuration item at initialization stage. Item of +! this type is configured oneshoot and not reloadable. If invalid value configured in the +! file, dpvs would use its default value. +! +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + log_level WARNING + ! log_file /var/log/dpvs.log + ! log_async_mode on +} + +! netif config +netif_defs { + pktpool_size 524287 + pktpool_cache 256 + + device dpdk0 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + kni_name dpdk0.kni + } +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + type master + cpu_id 0 + } + + worker cpu1 { + type slave + cpu_id 1 + port dpdk0 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port dpdk0 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port dpdk0 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port dpdk0 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu5 { + type slave + cpu_id 5 + port dpdk0 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu6 { + type slave + cpu_id 6 + port dpdk0 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu7 { + type slave + cpu_id 7 + port dpdk0 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu8 { + type slave + cpu_id 8 + port dpdk0 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + } + + ! worker cpu9 { + ! type kni + ! cpu_id 9 + ! port dpdk0 { + ! tx_queue_ids 8 + ! } + !} + +} + +! timer config +timer_defs { + # cpu job loops to schedule dpdk timer management + schedule_interval 500 +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 + timeout 60 +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 + fragment { + bucket_number 4096 + bucket_entries 16 + max_entries 4096 + ttl 1 + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method hlist + recycle_time 10 + } +} + +! control plane config +ctrl_defs { + lcore_msg { + ring_size 4096 + sync_msg_timeout_us 20000 + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 + conn_pool_cache 256 + conn_init_timeout 3 + ! expire_quiescent_template + ! fast_xmit_close + ! redirect off + } + + udp { + ! defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { + normal 300 + last 3 + } + } + + tcp { + ! defence_tcp_drop + timeout { + none 2 + established 90 + syn_sent 3 + syn_recv 30 + fin_wait 7 + time_wait 7 + close 3 + close_wait 7 + last_ack 7 + listen 120 + synack 30 + last 2 + } + synproxy { + synack_options { + mss 1452 + ttl 63 + sack + ! wscale + ! timestamp + } + ! defer_rs_syn + rs_syn_max_retry 3 + ack_storm_thresh 10 + max_ack_saved 3 + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +! sa_pool config +sa_pool { + pool_hash_size 16 +} diff --git a/src/netif.c b/src/netif.c index 55d661e40..69b37ac9b 100644 --- a/src/netif.c +++ b/src/netif.c @@ -88,7 +88,7 @@ struct port_conf_stream { int rx_queue_nb; int rx_desc_nb; char rss[32]; - int mtu; + int mtu; int tx_queue_nb; int tx_desc_nb; @@ -269,7 +269,7 @@ static void device_handler(vector_t tokens) port_cfg->tx_queue_nb = -1; port_cfg->rx_desc_nb = NETIF_NB_RX_DESC_DEF; port_cfg->tx_desc_nb = NETIF_NB_TX_DESC_DEF; - port_cfg->mtu = NETIF_DEFAULT_ETH_MTU; + port_cfg->mtu = NETIF_DEFAULT_ETH_MTU; port_cfg->promisc_mode = false; strncpy(port_cfg->rss, "tcp", sizeof(port_cfg->rss)); @@ -514,7 +514,7 @@ static void promisc_mode_handler(vector_t tokens) static void custom_mtu_handler(vector_t tokens) { - char *str = set_value(tokens); + char *str = set_value(tokens); int mtu = 0; struct port_conf_stream *current_device = list_entry(port_list.next, struct port_conf_stream, port_list_node); @@ -902,7 +902,7 @@ void install_netif_keywords(void) install_keyword("filter", fdir_filter_handler, KW_TYPE_INIT); install_sublevel_end(); install_keyword("promisc_mode", promisc_mode_handler, KW_TYPE_INIT); - install_keyword("mtu", custom_mtu_handler,KW_TYPE_INIT); + install_keyword("mtu", custom_mtu_handler,KW_TYPE_INIT); install_keyword("kni_name", kni_name_handler, KW_TYPE_INIT); install_sublevel_end(); install_keyword("bonding", bonding_handler, KW_TYPE_INIT); @@ -3412,7 +3412,7 @@ static inline void port_mtu_set(struct netif_port *port) } port->mtu = mtu; - rte_eth_dev_set_mtu((uint8_t)port->id,port->mtu); + rte_eth_dev_set_mtu((uint8_t)port->id,port->mtu); } @@ -3565,7 +3565,7 @@ static void fill_port_config(struct netif_port *port, char *promisc_on) /* using default configurations */ port->rxq_desc_nb = NETIF_NB_RX_DESC_DEF; port->txq_desc_nb = NETIF_NB_TX_DESC_DEF; - port->mtu = NETIF_DEFAULT_ETH_MTU; + port->mtu = NETIF_DEFAULT_ETH_MTU; } if (port->type == PORT_TYPE_BOND_MASTER) { @@ -3588,11 +3588,11 @@ static void fill_port_config(struct netif_port *port, char *promisc_on) if (cfg_stream) { port->rxq_desc_nb = cfg_stream->rx_desc_nb; port->txq_desc_nb = cfg_stream->tx_desc_nb; - port->mtu = cfg_stream->mtu; + port->mtu = cfg_stream->mtu; } else { port->rxq_desc_nb = NETIF_NB_RX_DESC_DEF; port->txq_desc_nb = NETIF_NB_TX_DESC_DEF; - port->mtu = NETIF_DEFAULT_ETH_MTU; + port->mtu = NETIF_DEFAULT_ETH_MTU; } } /* enable promicuous mode if configured */ @@ -3696,7 +3696,7 @@ int netif_port_start(struct netif_port *port) if ((ret = netif_port_fdir_dstport_mask_set(port)) != EDPVS_OK) return ret; if ((ret = rte_eth_dev_set_mtu(port->id,port->mtu)) != EDPVS_OK) - return ret; + return ret; if (port->flag & NETIF_PORT_FLAG_TX_IP_CSUM_OFFLOAD) port->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; From 00025f021fabd51f13f0f7945a696843e87e4f44 Mon Sep 17 00:00:00 2001 From: ZhangTengfei Date: Fri, 26 Feb 2021 19:19:54 +0800 Subject: [PATCH 19/35] coding style alignments --- conf/dpvs.bond.conf.sample | 778 +++++++++++++++--------------- conf/dpvs.conf.items | 528 ++++++++++---------- conf/dpvs.conf.sample | 680 +++++++++++++------------- conf/dpvs.conf.single-bond.sample | 576 +++++++++++----------- conf/dpvs.conf.single-nic.sample | 520 ++++++++++---------- 5 files changed, 1541 insertions(+), 1541 deletions(-) diff --git a/conf/dpvs.bond.conf.sample b/conf/dpvs.bond.conf.sample index e6ed1bdbd..a93679990 100644 --- a/conf/dpvs.bond.conf.sample +++ b/conf/dpvs.bond.conf.sample @@ -1,389 +1,389 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! This is dpvs default configuration file. -! -! The attribute "" denotes the configuration item at initialization stage. Item of -! this type is configured oneshoot and not reloadable. If invalid value configured in the -! file, dpvs would use its default value. -! -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - log_level WARNING - ! log_file /var/log/dpvs.log - ! log_async_mode off - ! pdump off -} - -! netif config -netif_defs { - pktpool_size 1048575 - pktpool_cache 256 - - device dpdk0 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk0.kni - } - - device dpdk1 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk1.kni - } - - - device dpdk2 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk2.kni - } - - device dpdk3 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk3.kni - } - - bonding bond0 { - mode 0 - slave dpdk0 - slave dpdk1 - primary dpdk0 - kni_name bond0.kni - } - - bonding bond1 { - mode 0 - slave dpdk2 - slave dpdk3 - primary dpdk2 - kni_name bond1.kni - } -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - type master - cpu_id 0 - } - - worker cpu1 { - type slave - cpu_id 1 - port bond0 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port bond0 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port bond0 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port bond0 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu5 { - type slave - cpu_id 5 - port bond0 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu6 { - type slave - cpu_id 6 - port bond0 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu7 { - type slave - cpu_id 7 - port bond0 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu8 { - type slave - cpu_id 8 - port bond0 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - port bond1 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - } - - ! worker cpu9 { - ! type kni - ! cpu_id 9 - ! port bond0 { - ! tx_queue_ids 8 - ! } - ! port bond1 { - ! tx_queue_ids 8 - ! } - !} - -} - -! timer config -timer_defs { - # cpu job loops to schedule dpdk timer management - schedule_interval 500 -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 - timeout 60 -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 - fragment { - bucket_number 4096 - bucket_entries 16 - max_entries 4096 - ttl 1 - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method hlist - recycle_time 10 - } -} - -! control plane config -ctrl_defs { - lcore_msg { - ring_size 4096 - sync_msg_timeout_us 20000 - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 - conn_pool_cache 256 - conn_init_timeout 3 - ! expire_quiescent_template - ! fast_xmit_close - ! redirect off - } - - udp { - ! defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { - normal 300 - last 3 - } - } - - tcp { - ! defence_tcp_drop - timeout { - none 2 - established 90 - syn_sent 3 - syn_recv 30 - fin_wait 7 - time_wait 7 - close 3 - close_wait 7 - last_ack 7 - listen 120 - synack 30 - last 2 - } - synproxy { - synack_options { - mss 1452 - ttl 63 - sack - ! wscale - ! timestamp - } - ! defer_rs_syn - rs_syn_max_retry 3 - ack_storm_thresh 10 - max_ack_saved 3 - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -! sa_pool config -sa_pool { - pool_hash_size 16 -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! This is dpvs default configuration file. +! +! The attribute "" denotes the configuration item at initialization stage. Item of +! this type is configured oneshoot and not reloadable. If invalid value configured in the +! file, dpvs would use its default value. +! +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + log_level WARNING + ! log_file /var/log/dpvs.log + ! log_async_mode off + ! pdump off +} + +! netif config +netif_defs { + pktpool_size 1048575 + pktpool_cache 256 + + device dpdk0 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk0.kni + } + + device dpdk1 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk1.kni + } + + + device dpdk2 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk2.kni + } + + device dpdk3 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk3.kni + } + + bonding bond0 { + mode 0 + slave dpdk0 + slave dpdk1 + primary dpdk0 + kni_name bond0.kni + } + + bonding bond1 { + mode 0 + slave dpdk2 + slave dpdk3 + primary dpdk2 + kni_name bond1.kni + } +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + type master + cpu_id 0 + } + + worker cpu1 { + type slave + cpu_id 1 + port bond0 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port bond0 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port bond0 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port bond0 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu5 { + type slave + cpu_id 5 + port bond0 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu6 { + type slave + cpu_id 6 + port bond0 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu7 { + type slave + cpu_id 7 + port bond0 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu8 { + type slave + cpu_id 8 + port bond0 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + port bond1 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + } + + ! worker cpu9 { + ! type kni + ! cpu_id 9 + ! port bond0 { + ! tx_queue_ids 8 + ! } + ! port bond1 { + ! tx_queue_ids 8 + ! } + !} + +} + +! timer config +timer_defs { + # cpu job loops to schedule dpdk timer management + schedule_interval 500 +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 + timeout 60 +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 + fragment { + bucket_number 4096 + bucket_entries 16 + max_entries 4096 + ttl 1 + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method hlist + recycle_time 10 + } +} + +! control plane config +ctrl_defs { + lcore_msg { + ring_size 4096 + sync_msg_timeout_us 20000 + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 + conn_pool_cache 256 + conn_init_timeout 3 + ! expire_quiescent_template + ! fast_xmit_close + ! redirect off + } + + udp { + ! defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { + normal 300 + last 3 + } + } + + tcp { + ! defence_tcp_drop + timeout { + none 2 + established 90 + syn_sent 3 + syn_recv 30 + fin_wait 7 + time_wait 7 + close 3 + close_wait 7 + last_ack 7 + listen 120 + synack 30 + last 2 + } + synproxy { + synack_options { + mss 1452 + ttl 63 + sack + ! wscale + ! timestamp + } + ! defer_rs_syn + rs_syn_max_retry 3 + ack_storm_thresh 10 + max_ack_saved 3 + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +! sa_pool config +sa_pool { + pool_hash_size 16 +} diff --git a/conf/dpvs.conf.items b/conf/dpvs.conf.items index a27f1c23f..fee3fb379 100644 --- a/conf/dpvs.conf.items +++ b/conf/dpvs.conf.items @@ -1,264 +1,264 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! All dpvs configuration items and corresponding attributes are listed in this file. -! The attributes including: -! * item type: | default -! * item default value -! * item value range -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - #daemon - log_level INFO - log_file /var/log/dpvs.log - log_async_mode off - pdump off -} - -! netif config -netif_defs { - pktpool_size 2097151 <65535, 1023-134217728> - pktpool_cache 256 <256, 32-8192> - - device dpdk0 { - rx { - #max_burst_size 32 - queue_number 6 <16, 0-16> - descriptor_number 256 <256, 16-8192> - rss all - } - tx { - queue_number 6 <16, 0-16> - descriptor_number 512 <512, 16-8192> - } - fdir { - filter on - mode perfect - pballoc 64k <64k, 64k|128k|256k> - status matched - } - mtu 1500 <1500,0-9000> - promisc_mode - kni_name dpdk0.kni - } - - device dpdk1 { - rx { - #max_burst_size 32 - queue_number 4 - descriptor_number 128 - rss all - } - tx { - queue_number 4 - descriptor_number 256 - } - mtu 1500 - promisc_mode - kni_name dpdk1.kni - } - - device bond0 { - mode 4 <0-6> - slave dpdk0 - slave dpdk1 - primary dpdk0 - kni_name bond0.kni - } -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - cpu_id 0 - type master - } - - worker cpu1 { - type slave - cpu_id 1 which cpu the worker thread runs on - port dpdk0 { - rx_queue_ids 0 4 <0, 0-16, space separated list> - tx_queue_ids 0 <0, 0-16, space separated list> - isol_rx_cpu_ids 8 8 - isol_rxq_ring_sz 1048576 <1048576, 1024-2147483648, for all queues> - } - port dpdk1 { - rx_queue_ids 0 - tx_queue_ids 0 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port dpdk0 { - rx_queue_ids 1 5 - tx_queue_ids 1 - } - port dpdk1 { - rx_queue_ids 1 - tx_queue_ids 1 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port dpdk0 { - rx_queue_ids 2 - tx_queue_ids 2 4 5 - isol_rx_cpu_ids 9 [invalid id] 10 - } - port dpdk1 { - rx_queue_ids 2 - tx_queue_ids 2 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port dpdk0 { - rx_queue_ids 3 - tx_queue_ids 3 - } - port dpdk1 { - rx_queue_ids 3 - tx_queue_ids 3 - } - } - - ! kni worker config, optional - ! if not configure, kni packets are processed on master lcore - worker cpu5 { - type kni - cpu_id 5 - port dpdk0 { - tx_queue_ids 6 - } - port dpdk1 { - tx_queue_ids 4 - } - } -} - -! timer config -timer_defs { - # time interval(us) to schedule dpdk timer management - schedule_interval 500 <10, 1-10000000> -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 <128, 16-8192> - timeout 60 <60, 1-3600> -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 <64, 0-255> - fragment { - bucket_number 4096 <4096, 32-65536> - bucket_entries 16 <16, 1-256> - max_entries 409600 <4096, 32-65536> - ttl 1 <1, 1-255> - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method "hlist" <"hlist"/"lpm"> - recycle_time 10 <10, 1-36000> - lpm { - lpm6_max_rules 1024 <1024, 16-2147483647> - lpm6_num_tbl8s 65536 <65536, 16-2147483647> - rt6_array_size 65536 <65536, 16-2147483647> - rt6_hash_bucket 256 <256, 2-2147483647> - } - } -} - -! control plane config -ctrl_defs { - lcore_msg { - #bucket_number 256 - ring_size 4096 <4096, 256-524288> - sync_msg_timeout_us 2000 <2000, 1-∞> - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 <2097152, 65536-∞> - conn_pool_cache 256 <256, 1-∞> - conn_init_timeout 3 <3, 1-31535999> - expire_quiescent_template - fast_xmit_close - redirect off - } - - udp { - defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { <1-31535999> - normal 300 <300> - last 3 <3> - } - } - - tcp { - defence_tcp_drop - timeout { <1-31535999> - none 3 <2> - established 91 <90> - syn_sent 4 <3> - syn_recv 31 <30> - fin_wait 8 <7> - time_wait 8 <7> - close 4 <3> - close_wait 8 <7> - last_ack 8 <7> - listen 121 <120> - synack 31 <30> - last 3 <2> - } - synproxy { - synack_options { - mss 1452 <1452, 1-65535> - ttl 63 <63, 1-255> - sack - ! wscale - ! timestamp - } - !defer_rs_syn - rs_syn_max_retry 3 <3, 1-99> - ack_storm_thresh 10 <10, 1-999> - max_ack_saved 3 <1, 63> - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -sa_pool { - pool_hash_size 16 <16, 1-128> -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! All dpvs configuration items and corresponding attributes are listed in this file. +! The attributes including: +! * item type: | default +! * item default value +! * item value range +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + #daemon + log_level INFO + log_file /var/log/dpvs.log + log_async_mode off + pdump off +} + +! netif config +netif_defs { + pktpool_size 2097151 <65535, 1023-134217728> + pktpool_cache 256 <256, 32-8192> + + device dpdk0 { + rx { + #max_burst_size 32 + queue_number 6 <16, 0-16> + descriptor_number 256 <256, 16-8192> + rss all + } + tx { + queue_number 6 <16, 0-16> + descriptor_number 512 <512, 16-8192> + } + fdir { + filter on + mode perfect + pballoc 64k <64k, 64k|128k|256k> + status matched + } + ! mtu 1500 <1500,0-9000> + ! promisc_mode + ! kni_name dpdk0.kni + } + + device dpdk1 { + rx { + #max_burst_size 32 + queue_number 4 + descriptor_number 128 + rss all + } + tx { + queue_number 4 + descriptor_number 256 + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk1.kni + } + + device bond0 { + mode 4 <0-6> + slave dpdk0 + slave dpdk1 + primary dpdk0 + kni_name bond0.kni + } +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + cpu_id 0 + type master + } + + worker cpu1 { + type slave + cpu_id 1 which cpu the worker thread runs on + port dpdk0 { + rx_queue_ids 0 4 <0, 0-16, space separated list> + tx_queue_ids 0 <0, 0-16, space separated list> + isol_rx_cpu_ids 8 8 + isol_rxq_ring_sz 1048576 <1048576, 1024-2147483648, for all queues> + } + port dpdk1 { + rx_queue_ids 0 + tx_queue_ids 0 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port dpdk0 { + rx_queue_ids 1 5 + tx_queue_ids 1 + } + port dpdk1 { + rx_queue_ids 1 + tx_queue_ids 1 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port dpdk0 { + rx_queue_ids 2 + tx_queue_ids 2 4 5 + isol_rx_cpu_ids 9 [invalid id] 10 + } + port dpdk1 { + rx_queue_ids 2 + tx_queue_ids 2 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port dpdk0 { + rx_queue_ids 3 + tx_queue_ids 3 + } + port dpdk1 { + rx_queue_ids 3 + tx_queue_ids 3 + } + } + + ! kni worker config, optional + ! if not configure, kni packets are processed on master lcore + worker cpu5 { + type kni + cpu_id 5 + port dpdk0 { + tx_queue_ids 6 + } + port dpdk1 { + tx_queue_ids 4 + } + } +} + +! timer config +timer_defs { + # time interval(us) to schedule dpdk timer management + schedule_interval 500 <10, 1-10000000> +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 <128, 16-8192> + timeout 60 <60, 1-3600> +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 <64, 0-255> + fragment { + bucket_number 4096 <4096, 32-65536> + bucket_entries 16 <16, 1-256> + max_entries 409600 <4096, 32-65536> + ttl 1 <1, 1-255> + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method "hlist" <"hlist"/"lpm"> + recycle_time 10 <10, 1-36000> + lpm { + lpm6_max_rules 1024 <1024, 16-2147483647> + lpm6_num_tbl8s 65536 <65536, 16-2147483647> + rt6_array_size 65536 <65536, 16-2147483647> + rt6_hash_bucket 256 <256, 2-2147483647> + } + } +} + +! control plane config +ctrl_defs { + lcore_msg { + #bucket_number 256 + ring_size 4096 <4096, 256-524288> + sync_msg_timeout_us 2000 <2000, 1-∞> + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 <2097152, 65536-∞> + conn_pool_cache 256 <256, 1-∞> + conn_init_timeout 3 <3, 1-31535999> + expire_quiescent_template + fast_xmit_close + redirect off + } + + udp { + defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { <1-31535999> + normal 300 <300> + last 3 <3> + } + } + + tcp { + defence_tcp_drop + timeout { <1-31535999> + none 3 <2> + established 91 <90> + syn_sent 4 <3> + syn_recv 31 <30> + fin_wait 8 <7> + time_wait 8 <7> + close 4 <3> + close_wait 8 <7> + last_ack 8 <7> + listen 121 <120> + synack 31 <30> + last 3 <2> + } + synproxy { + synack_options { + mss 1452 <1452, 1-65535> + ttl 63 <63, 1-255> + sack + ! wscale + ! timestamp + } + !defer_rs_syn + rs_syn_max_retry 3 <3, 1-99> + ack_storm_thresh 10 <10, 1-999> + max_ack_saved 3 <1, 63> + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +sa_pool { + pool_hash_size 16 <16, 1-128> +} diff --git a/conf/dpvs.conf.sample b/conf/dpvs.conf.sample index 32d3227c9..28765edf3 100644 --- a/conf/dpvs.conf.sample +++ b/conf/dpvs.conf.sample @@ -1,340 +1,340 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! This is dpvs default configuration file. -! -! The attribute "" denotes the configuration item at initialization stage. Item of -! this type is configured oneshoot and not reloadable. If invalid value configured in the -! file, dpvs would use its default value. -! -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - log_level WARNING - ! log_file /var/log/dpvs.log - ! log_async_mode on - ! pdump off -} - -! netif config -netif_defs { - pktpool_size 1048575 - pktpool_cache 256 - - device dpdk0 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - kni_name dpdk0.kni - } - - device dpdk1 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - kni_name dpdk1.kni - } - - ! bonding bond0 { - ! mode 0 - ! slave dpdk0 - ! slave dpdk1 - ! primary dpdk0 - ! kni_name bond0.kni - !} -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - type master - cpu_id 0 - } - - worker cpu1 { - type slave - cpu_id 1 - port dpdk0 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port dpdk0 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port dpdk0 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port dpdk0 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu5 { - type slave - cpu_id 5 - port dpdk0 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu6 { - type slave - cpu_id 6 - port dpdk0 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu7 { - type slave - cpu_id 7 - port dpdk0 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu8 { - type slave - cpu_id 8 - port dpdk0 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - port dpdk1 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - } - - ! worker cpu9 { - ! type kni - ! cpu_id 9 - ! port dpdk0 { - ! tx_queue_ids 8 - ! } - ! port dpdk1 { - ! tx_queue_ids 8 - ! } - !} - -} - -! timer config -timer_defs { - # cpu job loops to schedule dpdk timer management - schedule_interval 500 -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 - timeout 60 -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 - fragment { - bucket_number 4096 - bucket_entries 16 - max_entries 4096 - ttl 1 - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method hlist - recycle_time 10 - } -} - -! control plane config -ctrl_defs { - lcore_msg { - ring_size 4096 - sync_msg_timeout_us 20000 - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 - conn_pool_cache 256 - conn_init_timeout 3 - ! expire_quiescent_template - ! fast_xmit_close - ! redirect off - } - - udp { - ! defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { - normal 300 - last 3 - } - } - - tcp { - ! defence_tcp_drop - timeout { - none 2 - established 90 - syn_sent 3 - syn_recv 30 - fin_wait 7 - time_wait 7 - close 3 - close_wait 7 - last_ack 7 - listen 120 - synack 30 - last 2 - } - synproxy { - synack_options { - mss 1452 - ttl 63 - sack - ! wscale - ! timestamp - } - ! defer_rs_syn - rs_syn_max_retry 3 - ack_storm_thresh 10 - max_ack_saved 3 - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -! sa_pool config -sa_pool { - pool_hash_size 16 -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! This is dpvs default configuration file. +! +! The attribute "" denotes the configuration item at initialization stage. Item of +! this type is configured oneshoot and not reloadable. If invalid value configured in the +! file, dpvs would use its default value. +! +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + log_level WARNING + ! log_file /var/log/dpvs.log + ! log_async_mode on + ! pdump off +} + +! netif config +netif_defs { + pktpool_size 1048575 + pktpool_cache 256 + + device dpdk0 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + kni_name dpdk0.kni + } + + device dpdk1 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + kni_name dpdk1.kni + } + + ! bonding bond0 { + ! mode 0 + ! slave dpdk0 + ! slave dpdk1 + ! primary dpdk0 + ! kni_name bond0.kni + !} +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + type master + cpu_id 0 + } + + worker cpu1 { + type slave + cpu_id 1 + port dpdk0 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port dpdk0 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port dpdk0 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port dpdk0 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu5 { + type slave + cpu_id 5 + port dpdk0 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu6 { + type slave + cpu_id 6 + port dpdk0 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu7 { + type slave + cpu_id 7 + port dpdk0 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu8 { + type slave + cpu_id 8 + port dpdk0 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + port dpdk1 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + } + + ! worker cpu9 { + ! type kni + ! cpu_id 9 + ! port dpdk0 { + ! tx_queue_ids 8 + ! } + ! port dpdk1 { + ! tx_queue_ids 8 + ! } + !} + +} + +! timer config +timer_defs { + # cpu job loops to schedule dpdk timer management + schedule_interval 500 +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 + timeout 60 +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 + fragment { + bucket_number 4096 + bucket_entries 16 + max_entries 4096 + ttl 1 + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method hlist + recycle_time 10 + } +} + +! control plane config +ctrl_defs { + lcore_msg { + ring_size 4096 + sync_msg_timeout_us 20000 + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 + conn_pool_cache 256 + conn_init_timeout 3 + ! expire_quiescent_template + ! fast_xmit_close + ! redirect off + } + + udp { + ! defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { + normal 300 + last 3 + } + } + + tcp { + ! defence_tcp_drop + timeout { + none 2 + established 90 + syn_sent 3 + syn_recv 30 + fin_wait 7 + time_wait 7 + close 3 + close_wait 7 + last_ack 7 + listen 120 + synack 30 + last 2 + } + synproxy { + synack_options { + mss 1452 + ttl 63 + sack + ! wscale + ! timestamp + } + ! defer_rs_syn + rs_syn_max_retry 3 + ack_storm_thresh 10 + max_ack_saved 3 + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +! sa_pool config +sa_pool { + pool_hash_size 16 +} diff --git a/conf/dpvs.conf.single-bond.sample b/conf/dpvs.conf.single-bond.sample index 2e17be2ec..b7df4a427 100644 --- a/conf/dpvs.conf.single-bond.sample +++ b/conf/dpvs.conf.single-bond.sample @@ -1,288 +1,288 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! This is dpvs default configuration file. -! -! The attribute "" denotes the configuration item at initialization stage. Item of -! this type is configured oneshoot and not reloadable. If invalid value configured in the -! file, dpvs would use its default value. -! -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - log_level WARNING - ! log_file /var/log/dpvs.log - ! log_async_mode on -} - -! netif config -netif_defs { - pktpool_size 524287 - pktpool_cache 256 - - device dpdk0 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk0.kni - } - - device dpdk2 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - ! kni_name dpdk2.kni - } - - bonding bond0 { - mode 0 - slave dpdk0 - slave dpdk2 - primary dpdk0 - kni_name bond0.kni - } -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - type master - cpu_id 0 - } - - worker cpu1 { - type slave - cpu_id 1 - port bond0 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port bond0 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port bond0 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port bond0 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu5 { - type slave - cpu_id 5 - port bond0 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu6 { - type slave - cpu_id 6 - port bond0 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu7 { - type slave - cpu_id 7 - port bond0 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu8 { - type slave - cpu_id 8 - port bond0 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - } - - ! worker cpu9 { - ! type kni - ! cpu_id 9 - ! port bond0 { - ! tx_queue_ids 8 - ! } - !} - -} - -! timer config -timer_defs { - # cpu job loops to schedule dpdk timer management - schedule_interval 500 -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 - timeout 60 -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 - fragment { - bucket_number 4096 - bucket_entries 16 - max_entries 4096 - ttl 1 - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method hlist - recycle_time 10 - } -} - -! control plane config -ctrl_defs { - lcore_msg { - ring_size 4096 - sync_msg_timeout_us 20000 - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 - conn_pool_cache 256 - conn_init_timeout 3 - ! expire_quiescent_template - ! fast_xmit_close - ! redirect off - } - - udp { - ! defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { - normal 300 - last 3 - } - } - - tcp { - ! defence_tcp_drop - timeout { - none 2 - established 90 - syn_sent 3 - syn_recv 30 - fin_wait 7 - time_wait 7 - close 3 - close_wait 7 - last_ack 7 - listen 120 - synack 30 - last 2 - } - synproxy { - synack_options { - mss 1452 - ttl 63 - sack - ! wscale - ! timestamp - } - ! defer_rs_syn - rs_syn_max_retry 3 - ack_storm_thresh 10 - max_ack_saved 3 - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -! sa_pool config -sa_pool { - pool_hash_size 16 -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! This is dpvs default configuration file. +! +! The attribute "" denotes the configuration item at initialization stage. Item of +! this type is configured oneshoot and not reloadable. If invalid value configured in the +! file, dpvs would use its default value. +! +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + log_level WARNING + ! log_file /var/log/dpvs.log + ! log_async_mode on +} + +! netif config +netif_defs { + pktpool_size 524287 + pktpool_cache 256 + + device dpdk0 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk0.kni + } + + device dpdk2 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + ! kni_name dpdk2.kni + } + + bonding bond0 { + mode 0 + slave dpdk0 + slave dpdk2 + primary dpdk0 + kni_name bond0.kni + } +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + type master + cpu_id 0 + } + + worker cpu1 { + type slave + cpu_id 1 + port bond0 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port bond0 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port bond0 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port bond0 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu5 { + type slave + cpu_id 5 + port bond0 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu6 { + type slave + cpu_id 6 + port bond0 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu7 { + type slave + cpu_id 7 + port bond0 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu8 { + type slave + cpu_id 8 + port bond0 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + } + + ! worker cpu9 { + ! type kni + ! cpu_id 9 + ! port bond0 { + ! tx_queue_ids 8 + ! } + !} + +} + +! timer config +timer_defs { + # cpu job loops to schedule dpdk timer management + schedule_interval 500 +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 + timeout 60 +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 + fragment { + bucket_number 4096 + bucket_entries 16 + max_entries 4096 + ttl 1 + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method hlist + recycle_time 10 + } +} + +! control plane config +ctrl_defs { + lcore_msg { + ring_size 4096 + sync_msg_timeout_us 20000 + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 + conn_pool_cache 256 + conn_init_timeout 3 + ! expire_quiescent_template + ! fast_xmit_close + ! redirect off + } + + udp { + ! defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { + normal 300 + last 3 + } + } + + tcp { + ! defence_tcp_drop + timeout { + none 2 + established 90 + syn_sent 3 + syn_recv 30 + fin_wait 7 + time_wait 7 + close 3 + close_wait 7 + last_ack 7 + listen 120 + synack 30 + last 2 + } + synproxy { + synack_options { + mss 1452 + ttl 63 + sack + ! wscale + ! timestamp + } + ! defer_rs_syn + rs_syn_max_retry 3 + ack_storm_thresh 10 + max_ack_saved 3 + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +! sa_pool config +sa_pool { + pool_hash_size 16 +} diff --git a/conf/dpvs.conf.single-nic.sample b/conf/dpvs.conf.single-nic.sample index 1ca74d6e8..1e0b105ca 100644 --- a/conf/dpvs.conf.single-nic.sample +++ b/conf/dpvs.conf.single-nic.sample @@ -1,260 +1,260 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! This is dpvs default configuration file. -! -! The attribute "" denotes the configuration item at initialization stage. Item of -! this type is configured oneshoot and not reloadable. If invalid value configured in the -! file, dpvs would use its default value. -! -! Note that dpvs configuration file supports the following comment type: -! * line comment: using '#" or '!' -! * inline range comment: using '<' and '>', put comment in between -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -! global config -global_defs { - log_level WARNING - ! log_file /var/log/dpvs.log - ! log_async_mode on -} - -! netif config -netif_defs { - pktpool_size 524287 - pktpool_cache 256 - - device dpdk0 { - rx { - queue_number 8 - descriptor_number 1024 - rss all - } - tx { - queue_number 8 - descriptor_number 1024 - } - fdir { - mode perfect - pballoc 64k - status matched - } - ! mtu 1500 - ! promisc_mode - kni_name dpdk0.kni - } -} - -! worker config (lcores) -worker_defs { - worker cpu0 { - type master - cpu_id 0 - } - - worker cpu1 { - type slave - cpu_id 1 - port dpdk0 { - rx_queue_ids 0 - tx_queue_ids 0 - ! isol_rx_cpu_ids 9 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu2 { - type slave - cpu_id 2 - port dpdk0 { - rx_queue_ids 1 - tx_queue_ids 1 - ! isol_rx_cpu_ids 10 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu3 { - type slave - cpu_id 3 - port dpdk0 { - rx_queue_ids 2 - tx_queue_ids 2 - ! isol_rx_cpu_ids 11 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu4 { - type slave - cpu_id 4 - port dpdk0 { - rx_queue_ids 3 - tx_queue_ids 3 - ! isol_rx_cpu_ids 12 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu5 { - type slave - cpu_id 5 - port dpdk0 { - rx_queue_ids 4 - tx_queue_ids 4 - ! isol_rx_cpu_ids 13 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu6 { - type slave - cpu_id 6 - port dpdk0 { - rx_queue_ids 5 - tx_queue_ids 5 - ! isol_rx_cpu_ids 14 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu7 { - type slave - cpu_id 7 - port dpdk0 { - rx_queue_ids 6 - tx_queue_ids 6 - ! isol_rx_cpu_ids 15 - ! isol_rxq_ring_sz 1048576 - } - } - - worker cpu8 { - type slave - cpu_id 8 - port dpdk0 { - rx_queue_ids 7 - tx_queue_ids 7 - ! isol_rx_cpu_ids 16 - ! isol_rxq_ring_sz 1048576 - } - } - - ! worker cpu9 { - ! type kni - ! cpu_id 9 - ! port dpdk0 { - ! tx_queue_ids 8 - ! } - !} - -} - -! timer config -timer_defs { - # cpu job loops to schedule dpdk timer management - schedule_interval 500 -} - -! dpvs neighbor config -neigh_defs { - unres_queue_length 128 - timeout 60 -} - -! dpvs ipv4 config -ipv4_defs { - forwarding off - default_ttl 64 - fragment { - bucket_number 4096 - bucket_entries 16 - max_entries 4096 - ttl 1 - } -} - -! dpvs ipv6 config -ipv6_defs { - disable off - forwarding off - route6 { - method hlist - recycle_time 10 - } -} - -! control plane config -ctrl_defs { - lcore_msg { - ring_size 4096 - sync_msg_timeout_us 20000 - priority_level low - } - ipc_msg { - unix_domain /var/run/dpvs_ctrl - } -} - -! ipvs config -ipvs_defs { - conn { - conn_pool_size 2097152 - conn_pool_cache 256 - conn_init_timeout 3 - ! expire_quiescent_template - ! fast_xmit_close - ! redirect off - } - - udp { - ! defence_udp_drop - uoa_mode opp - uoa_max_trail 3 - timeout { - normal 300 - last 3 - } - } - - tcp { - ! defence_tcp_drop - timeout { - none 2 - established 90 - syn_sent 3 - syn_recv 30 - fin_wait 7 - time_wait 7 - close 3 - close_wait 7 - last_ack 7 - listen 120 - synack 30 - last 2 - } - synproxy { - synack_options { - mss 1452 - ttl 63 - sack - ! wscale - ! timestamp - } - ! defer_rs_syn - rs_syn_max_retry 3 - ack_storm_thresh 10 - max_ack_saved 3 - conn_reuse_state { - close - time_wait - ! fin_wait - ! close_wait - ! last_ack - } - } - } -} - -! sa_pool config -sa_pool { - pool_hash_size 16 -} +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! This is dpvs default configuration file. +! +! The attribute "" denotes the configuration item at initialization stage. Item of +! this type is configured oneshoot and not reloadable. If invalid value configured in the +! file, dpvs would use its default value. +! +! Note that dpvs configuration file supports the following comment type: +! * line comment: using '#" or '!' +! * inline range comment: using '<' and '>', put comment in between +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! global config +global_defs { + log_level WARNING + ! log_file /var/log/dpvs.log + ! log_async_mode on +} + +! netif config +netif_defs { + pktpool_size 524287 + pktpool_cache 256 + + device dpdk0 { + rx { + queue_number 8 + descriptor_number 1024 + rss all + } + tx { + queue_number 8 + descriptor_number 1024 + } + fdir { + mode perfect + pballoc 64k + status matched + } + ! mtu 1500 + ! promisc_mode + kni_name dpdk0.kni + } +} + +! worker config (lcores) +worker_defs { + worker cpu0 { + type master + cpu_id 0 + } + + worker cpu1 { + type slave + cpu_id 1 + port dpdk0 { + rx_queue_ids 0 + tx_queue_ids 0 + ! isol_rx_cpu_ids 9 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu2 { + type slave + cpu_id 2 + port dpdk0 { + rx_queue_ids 1 + tx_queue_ids 1 + ! isol_rx_cpu_ids 10 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu3 { + type slave + cpu_id 3 + port dpdk0 { + rx_queue_ids 2 + tx_queue_ids 2 + ! isol_rx_cpu_ids 11 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu4 { + type slave + cpu_id 4 + port dpdk0 { + rx_queue_ids 3 + tx_queue_ids 3 + ! isol_rx_cpu_ids 12 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu5 { + type slave + cpu_id 5 + port dpdk0 { + rx_queue_ids 4 + tx_queue_ids 4 + ! isol_rx_cpu_ids 13 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu6 { + type slave + cpu_id 6 + port dpdk0 { + rx_queue_ids 5 + tx_queue_ids 5 + ! isol_rx_cpu_ids 14 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu7 { + type slave + cpu_id 7 + port dpdk0 { + rx_queue_ids 6 + tx_queue_ids 6 + ! isol_rx_cpu_ids 15 + ! isol_rxq_ring_sz 1048576 + } + } + + worker cpu8 { + type slave + cpu_id 8 + port dpdk0 { + rx_queue_ids 7 + tx_queue_ids 7 + ! isol_rx_cpu_ids 16 + ! isol_rxq_ring_sz 1048576 + } + } + + ! worker cpu9 { + ! type kni + ! cpu_id 9 + ! port dpdk0 { + ! tx_queue_ids 8 + ! } + !} + +} + +! timer config +timer_defs { + # cpu job loops to schedule dpdk timer management + schedule_interval 500 +} + +! dpvs neighbor config +neigh_defs { + unres_queue_length 128 + timeout 60 +} + +! dpvs ipv4 config +ipv4_defs { + forwarding off + default_ttl 64 + fragment { + bucket_number 4096 + bucket_entries 16 + max_entries 4096 + ttl 1 + } +} + +! dpvs ipv6 config +ipv6_defs { + disable off + forwarding off + route6 { + method hlist + recycle_time 10 + } +} + +! control plane config +ctrl_defs { + lcore_msg { + ring_size 4096 + sync_msg_timeout_us 20000 + priority_level low + } + ipc_msg { + unix_domain /var/run/dpvs_ctrl + } +} + +! ipvs config +ipvs_defs { + conn { + conn_pool_size 2097152 + conn_pool_cache 256 + conn_init_timeout 3 + ! expire_quiescent_template + ! fast_xmit_close + ! redirect off + } + + udp { + ! defence_udp_drop + uoa_mode opp + uoa_max_trail 3 + timeout { + normal 300 + last 3 + } + } + + tcp { + ! defence_tcp_drop + timeout { + none 2 + established 90 + syn_sent 3 + syn_recv 30 + fin_wait 7 + time_wait 7 + close 3 + close_wait 7 + last_ack 7 + listen 120 + synack 30 + last 2 + } + synproxy { + synack_options { + mss 1452 + ttl 63 + sack + ! wscale + ! timestamp + } + ! defer_rs_syn + rs_syn_max_retry 3 + ack_storm_thresh 10 + max_ack_saved 3 + conn_reuse_state { + close + time_wait + ! fin_wait + ! close_wait + ! last_ack + } + } + } +} + +! sa_pool config +sa_pool { + pool_hash_size 16 +} From fcda8064b2b60ec69d19a61a50c8b05af19513a2 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Mon, 8 Mar 2021 10:37:02 +0800 Subject: [PATCH 20/35] tc: per-lcore tuning --- include/conf/tc.h | 22 +- include/ctrl.h | 8 +- include/global_data.h | 6 + include/linux_ipv6.h | 12 + include/netif.h | 4 +- include/tc/cls.h | 3 +- include/tc/sch.h | 114 +++++---- include/tc/tc.h | 11 +- src/global_data.c | 8 + src/ipv4.c | 3 - src/ipvs/ip_vs_conn.c | 4 +- src/main.c | 8 +- src/netif.c | 13 +- src/tc/cls.c | 36 +-- src/tc/cls_match.c | 88 +++++-- src/tc/sch_fifo.c | 35 +-- src/tc/sch_generic.c | 216 +++++----------- src/tc/sch_pfifo_fast.c | 102 +++++--- src/tc/sch_shm.c | 161 ++++++++++++ src/tc/sch_tbf.c | 121 +++++---- src/tc/tc.c | 243 ++++++++---------- src/tc/tc_ctrl.c | 554 ++++++++++++++++++++++++++++++++-------- tools/dpip/cls.c | 24 +- tools/dpip/ipset.c | 0 tools/dpip/qsch.c | 58 +++-- tools/dpip/route.c | 0 26 files changed, 1160 insertions(+), 694 deletions(-) create mode 100644 src/tc/sch_shm.c mode change 100755 => 100644 tools/dpip/ipset.c mode change 100755 => 100644 tools/dpip/route.c diff --git a/include/conf/tc.h b/include/conf/tc.h index b3cc4ebb8..d9a4981fd 100644 --- a/include/conf/tc.h +++ b/include/conf/tc.h @@ -30,6 +30,9 @@ #include "tc/sch.h" #include "tc/cls.h" +#define TC_F_OPS_STATS 0x0001 +#define TC_F_OPS_VERBOSE 0x0002 + typedef enum { TC_OBJ_QSCH, TC_OBJ_CLS, @@ -39,6 +42,7 @@ typedef enum { * scheduler section */ struct tc_qsch_param { + lcoreid_t cid; tc_handle_t handle; tc_handle_t where; /* TC_H_ROOT | TC_H_INGRESS | parent */ char kind[TCNAMESIZ]; /* qsch type: bfifo, tbf, ... */ @@ -50,23 +54,18 @@ struct tc_qsch_param { } qopt; /* get only */ + int cls_cnt; + uint32_t flags; struct qsch_qstats qstats; struct qsch_bstats bstats; - /* master only, to fill stats from workers. */ - struct qsch_qstats qstats_cpus[DPVS_MAX_LCORE]; - struct qsch_bstats bstats_cpus[DPVS_MAX_LCORE]; -} __attribute__((__packed__)); - -struct tc_qsch_stats { - struct qsch_qstats qstats; - struct qsch_bstats bstats; } __attribute__((__packed__)); /** * classifier section */ struct tc_cls_param { + lcoreid_t cid; tc_handle_t sch_id; /* ID of Qsch attached to */ tc_handle_t handle; /* or class-id */ char kind[TCNAMESIZ]; /* tc_cls type: "match", ... */ @@ -88,6 +87,7 @@ union tc_param { struct tc_conf { tc_obj_t obj; /* schedler, classifier, ... */ + uint32_t op_flags; /* TC_F_OPS_XXX */ char ifname[IFNAMSIZ]; union tc_param param; /* object specific parameters */ } __attribute__((__packed__)); @@ -102,6 +102,12 @@ static inline tc_handle_t tc_handle_atoi(const char *handle) if (sscanf(handle, "%x:", &maj) == 1) return (maj << 16); + if (!strncmp(handle, "root", 4)) + return TC_H_ROOT; + + if (!strncmp(handle, "ingress", 7)) + return TC_H_INGRESS; + return TC_H_UNSPEC; } diff --git a/include/ctrl.h b/include/ctrl.h index dedb7ca71..72f7df02a 100644 --- a/include/ctrl.h +++ b/include/ctrl.h @@ -195,7 +195,6 @@ int msg_dump(const struct dpvs_msg *msg, char *buf, int len); #define MSG_TYPE_BLKLST_ADD 9 #define MSG_TYPE_BLKLST_DEL 10 #define MSG_TYPE_STATS_GET 11 -#define MSG_TYPE_TC_STATS 13 #define MSG_TYPE_CONN_GET 14 #define MSG_TYPE_CONN_GET_ALL 15 #define MSG_TYPE_IPV6_STATS 16 @@ -209,10 +208,15 @@ int msg_dump(const struct dpvs_msg *msg, char *buf, int len); #define MSG_TYPE_IFA_SYNC 24 #define MSG_TYPE_WHTLST_ADD 25 #define MSG_TYPE_WHTLST_DEL 26 +#define MSG_TYPE_TC_QSCH_GET 27 +#define MSG_TYPE_TC_QSCH_SET 28 +#define MSG_TYPE_TC_CLS_GET 29 +#define MSG_TYPE_TC_CLS_SET 30 +#define MSG_TYPE_IPVS_RANGE_START 100 /* for svc per_core, refer to service.h*/ enum { - MSG_TYPE_SVC_SET_FLUSH = MSG_TYPE_WHTLST_DEL + 1, + MSG_TYPE_SVC_SET_FLUSH = MSG_TYPE_IPVS_RANGE_START, MSG_TYPE_SVC_SET_ZERO, MSG_TYPE_SVC_SET_ADD, MSG_TYPE_SVC_SET_EDIT, diff --git a/include/global_data.h b/include/global_data.h index 05b5fbfe1..19eec347c 100644 --- a/include/global_data.h +++ b/include/global_data.h @@ -56,6 +56,12 @@ extern dpvs_lcore_role_t g_lcore_role[DPVS_MAX_LCORE]; * */ extern int g_lcore_index[DPVS_MAX_LCORE]; extern int g_lcore_num; +extern lcoreid_t g_master_lcore_id; +extern lcoreid_t g_kni_lcore_id; +extern uint8_t g_slave_lcore_num; +extern uint8_t g_isol_rx_lcore_num; +extern uint64_t g_slave_lcore_mask; +extern uint64_t g_isol_rx_lcore_mask; int global_data_init(void); int global_data_term(void); diff --git a/include/linux_ipv6.h b/include/linux_ipv6.h index 05bc4ae3b..c25baa319 100644 --- a/include/linux_ipv6.h +++ b/include/linux_ipv6.h @@ -266,6 +266,18 @@ static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr return memcmp(a1, a2, sizeof(struct in6_addr)); } +static inline int ipv6_addr_cmp_u128(const struct in6_addr *a1, const struct in6_addr *a2) +{ + uint8_t *p1, *p2; + + for (p1 = (uint8_t *)a1, p2 = (uint8_t *)a2; p1 - (uint8_t *)a1 < 16; p1++, p2++) { + if (*p1 != *p2) + return *p1 > *p2 ? 1 : -1; + } + + return 0; +} + static inline bool ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, const struct in6_addr *a2) diff --git a/include/netif.h b/include/netif.h index fe180cdfb..3098d9bd9 100644 --- a/include/netif.h +++ b/include/netif.h @@ -248,7 +248,7 @@ struct netif_port { struct netif_kni kni; /* kni device */ union netif_bond *bond; /* bonding conf */ struct vlan_info *vlan_info; /* VLANs info for real device */ - struct netif_tc tc; /* traffic control */ + struct netif_tc tc[DPVS_MAX_LCORE]; /* traffic control */ struct netif_ops *netif_ops; } __rte_cache_aligned; @@ -321,7 +321,7 @@ static inline void *netif_priv(struct netif_port *dev) static inline struct netif_tc *netif_tc(struct netif_port *dev) { - return &dev->tc; + return &dev->tc[rte_lcore_id()]; } static inline uint16_t dpvs_rte_eth_dev_count(void) diff --git a/include/tc/cls.h b/include/tc/cls.h index 293189a87..419acea72 100644 --- a/include/tc/cls.h +++ b/include/tc/cls.h @@ -57,7 +57,6 @@ struct tc_cls_ops { int (*dump)(struct tc_cls *cls, void *arg); struct list_head list; - rte_atomic32_t refcnt; }; /* classifier */ @@ -86,6 +85,8 @@ int tc_cls_change(struct tc_cls *cls, const void *arg); struct tc_cls *tc_cls_lookup(struct Qsch *sch, tc_handle_t handle); +tc_handle_t cls_alloc_handle(struct Qsch *sch); + #endif /* __DPVS__ */ #endif /* __DPVS_TC_CLS_H__ */ diff --git a/include/tc/sch.h b/include/tc/sch.h index 9f348e816..86942f9c7 100644 --- a/include/tc/sch.h +++ b/include/tc/sch.h @@ -26,6 +26,7 @@ #define __DPVS_TC_SCH_H__ #include #include "conf/common.h" +#include "tc.h" #ifdef __DPVS__ #include "dpdk.h" #include "timer.h" @@ -67,7 +68,6 @@ struct Qsch_ops { /* internal use */ struct list_head list; /* global sch ops list */ - rte_atomic32_t refcnt; }; /* queue scheduler, see kernel Qdisc */ @@ -79,23 +79,17 @@ struct Qsch { int cls_cnt; struct hlist_node hlist; /* netif_tc.qsch_hash node */ struct netif_tc *tc; - rte_atomic32_t refcnt; + uint32_t refcnt; uint32_t limit; - struct tc_mbuf_head q[DPVS_MAX_LCORE]; + struct tc_mbuf_head q; uint32_t flags; struct Qsch_ops *ops; /* per-lcore statistics */ - struct qsch_qstats qstats[DPVS_MAX_LCORE]; - struct qsch_bstats bstats[DPVS_MAX_LCORE]; - - struct dpvs_timer rc_timer; - -#define this_q q[rte_lcore_id()] -#define this_qstats qstats[rte_lcore_id()] -#define this_bstats bstats[rte_lcore_id()] + struct qsch_qstats qstats; + struct qsch_bstats bstats; }; struct qsch_rate { @@ -130,12 +124,11 @@ static inline uint64_t qsch_t2l_ns(const struct qsch_rate *rate, uint64_t ns) static inline int qsch_drop(struct Qsch *sch, struct rte_mbuf *mbuf) { rte_pktmbuf_free(mbuf); - sch->this_qstats.drops++; + sch->qstats.drops++; return EDPVS_DROP; } -static inline int __qsch_enqueue_tail(struct Qsch *sch, struct rte_mbuf *mbuf, - struct tc_mbuf_head *qh) +static inline int __qsch_enqueue_tail(struct Qsch *sch, struct rte_mbuf *mbuf, struct tc_mbuf_head *q) { struct tc_mbuf *tm; assert(sch && sch->tc && sch->tc->tc_mbuf_pool && mbuf); @@ -147,88 +140,96 @@ static inline int __qsch_enqueue_tail(struct Qsch *sch, struct rte_mbuf *mbuf, } tm->mbuf = mbuf; - list_add_tail(&tm->list, &qh->mbufs); - qh->qlen++; - sch->this_qstats.backlog += mbuf->pkt_len; - sch->this_qstats.qlen++; + list_add_tail(&tm->list, &q->mbufs); + q->qlen++; return EDPVS_OK; } static inline int qsch_enqueue_tail(struct Qsch *sch, struct rte_mbuf *mbuf) { - return __qsch_enqueue_tail(sch, mbuf, &sch->this_q); + int err; + + err = __qsch_enqueue_tail(sch, mbuf, &sch->q); + if (unlikely(err != EDPVS_OK)) + return err; + + sch->qstats.backlog += mbuf->pkt_len; + sch->qstats.qlen++; + + return EDPVS_OK; } -static inline struct rte_mbuf *__qsch_dequeue_head(struct Qsch *sch, - struct tc_mbuf_head *qh) +static inline struct rte_mbuf *__qsch_dequeue_head(struct Qsch *sch, struct tc_mbuf_head *q) { struct tc_mbuf *tm; struct rte_mbuf *mbuf; - tm = list_first_entry(&qh->mbufs, struct tc_mbuf, list); + tm = list_first_entry(&q->mbufs, struct tc_mbuf, list); if (unlikely(!tm)) return NULL; list_del(&tm->list); mbuf = tm->mbuf; - qh->qlen--; - sch->this_qstats.backlog -= mbuf->pkt_len; - sch->this_bstats.packets += 1; - sch->this_bstats.bytes += mbuf->pkt_len; - sch->this_qstats.qlen--; - + q->qlen--; rte_mempool_put(sch->tc->tc_mbuf_pool, tm); + return mbuf; } static inline struct rte_mbuf *qsch_dequeue_head(struct Qsch *sch) { - return __qsch_dequeue_head(sch, &sch->this_q); + struct rte_mbuf *mbuf; + + mbuf = __qsch_dequeue_head(sch, &sch->q); + if (unlikely(!mbuf)) + return NULL; + + sch->qstats.qlen--; + sch->qstats.backlog -= mbuf->pkt_len; + sch->bstats.packets++; + sch->bstats.bytes += mbuf->pkt_len; + + return mbuf; } static inline struct rte_mbuf *qsch_peek_head(struct Qsch *sch) { struct tc_mbuf *tm; - tm = list_first_entry(&sch->this_q.mbufs, struct tc_mbuf, list); + tm = list_first_entry(&sch->q.mbufs, struct tc_mbuf, list); if (unlikely(!tm)) return NULL; return tm->mbuf; } -static inline void __qsch_reset_queue(struct Qsch *sch, - struct tc_mbuf_head *qh) +static inline void __qsch_reset_queue(struct Qsch *sch, struct tc_mbuf_head *q) { struct tc_mbuf *tm, *n; - list_for_each_entry_safe(tm, n, &qh->mbufs, list) { + list_for_each_entry_safe(tm, n, &q->mbufs, list) { + list_del(&tm->list); qsch_drop(sch, tm->mbuf); rte_mempool_put(sch->tc->tc_mbuf_pool, tm); } - INIT_LIST_HEAD(&qh->mbufs); - qh->qlen = 0; - sch->this_qstats.qlen = 0; - sch->this_qstats.backlog = 0; + INIT_LIST_HEAD(&q->mbufs); + q->qlen = 0; } static inline void qsch_reset_queue(struct Qsch *sch) { - return __qsch_reset_queue(sch, &sch->this_q); + __qsch_reset_queue(sch, &sch->q); + sch->qstats.qlen = 0; + sch->qstats.backlog = 0; } /* Qsch APIs */ struct Qsch *qsch_create(struct netif_port *dev, const char *kind, tc_handle_t parent, tc_handle_t handle, const void *arg, int *errp); -struct Qsch *qsch_create_dflt(struct netif_port *dev, struct Qsch_ops *ops, - tc_handle_t parent); void qsch_destroy(struct Qsch *sch); int qsch_change(struct Qsch *sch, const void *arg); -void qsch_reset(struct Qsch *sch); -void qsch_stats(struct Qsch *sch, struct qsch_qstats *qstats, - struct qsch_bstats *bstats); void qsch_hash_add(struct Qsch *sch, bool invisible); void qsch_hash_del(struct Qsch *sch); @@ -236,20 +237,37 @@ void qsch_hash_del(struct Qsch *sch); struct Qsch *qsch_lookup(const struct netif_tc *tc, tc_handle_t handle); struct Qsch *qsch_lookup_noref(const struct netif_tc *tc, tc_handle_t handle); void qsch_do_sched(struct Qsch *sch); +tc_handle_t sch_alloc_handle(struct netif_tc *tc); + +static inline void sch_free(struct Qsch *sch) +{ + rte_free(sch); +} static inline void qsch_get(struct Qsch *sch) { - rte_atomic32_inc(&sch->refcnt); + sch->refcnt++; } static inline void qsch_put(struct Qsch *sch) { - rte_atomic32_dec(&sch->refcnt); + struct Qsch_ops *ops = sch->ops; + + if (--sch->refcnt > 0) + return; + + if (ops->reset) + ops->reset(sch); + if (ops->destroy) + ops->destroy(sch); + + sch_free(sch); } -int fifo_set_limit(struct Qsch *sch, unsigned int limit); -struct Qsch *fifo_create_dflt(struct Qsch *sch, struct Qsch_ops *ops, - unsigned int limit); +void *qsch_shm_get_or_create(struct Qsch *sch, uint32_t len); +int qsch_shm_put_or_destroy(struct Qsch *sch); +int qsch_shm_init(void); +int qsch_shm_term(void); #endif /* __DPVS__ */ diff --git a/include/tc/tc.h b/include/tc/tc.h index fbf5cddd9..600fa8ccb 100644 --- a/include/tc/tc.h +++ b/include/tc/tc.h @@ -39,7 +39,7 @@ typedef uint32_t tc_handle_t; #define TC #define RTE_LOGTYPE_TC RTE_LOGTYPE_USER1 -#define TC_ALIGNTO 64 +#define TC_ALIGNTO RTE_CACHE_LINE_SIZE #define TC_ALIGN(len) (((len) + TC_ALIGNTO-1) & ~(TC_ALIGNTO-1)) /* need a wrapper to save mbuf list, @@ -58,7 +58,6 @@ struct tc_mbuf { struct netif_tc { struct netif_port *dev; struct rte_mempool *tc_mbuf_pool; - rte_rwlock_t lock; /* * Qsch section @@ -80,22 +79,20 @@ struct Qsch_ops; struct tc_cls_ops; int tc_init(void); +int tc_term(void); int tc_ctrl_init(void); +int tc_ctrl_term(void); int tc_init_dev(struct netif_port *dev); int tc_destroy_dev(struct netif_port *dev); int tc_register_qsch(struct Qsch_ops *ops); int tc_unregister_qsch(struct Qsch_ops *ops); - struct Qsch_ops *tc_qsch_ops_lookup(const char *name); -void tc_qsch_ops_get(struct Qsch_ops *ops); -void tc_qsch_ops_put(struct Qsch_ops *ops); int tc_register_cls(struct tc_cls_ops *ops); int tc_unregister_cls(struct tc_cls_ops *ops); -struct tc_cls_ops *tc_cls_ops_get(const char *name); -void tc_cls_ops_put(struct tc_cls_ops *ops); +struct tc_cls_ops *tc_cls_ops_lookup(const char *name); struct rte_mbuf *tc_handle_egress(struct netif_tc *tc, struct rte_mbuf *mbuf, int *ret); diff --git a/src/global_data.c b/src/global_data.c index 7fa92944a..45b30dfd6 100644 --- a/src/global_data.c +++ b/src/global_data.c @@ -22,9 +22,17 @@ RTE_DEFINE_PER_LCORE(uint32_t, g_dpvs_poll_tick); uint64_t g_cycles_per_sec; + dpvs_lcore_role_t g_lcore_role[DPVS_MAX_LCORE]; int g_lcore_index[DPVS_MAX_LCORE]; + int g_lcore_num; +lcoreid_t g_master_lcore_id; +lcoreid_t g_kni_lcore_id = 0; /* By default g_kni_lcore_id is 0 and it indicates KNI core is not configured. */ +uint8_t g_slave_lcore_num; +uint8_t g_isol_rx_lcore_num; +uint64_t g_slave_lcore_mask; +uint64_t g_isol_rx_lcore_mask; int global_data_init(void) { diff --git a/src/ipv4.c b/src/ipv4.c index 2c054b3c7..392bf6fc3 100644 --- a/src/ipv4.c +++ b/src/ipv4.c @@ -239,9 +239,6 @@ static int ipv4_output_fin2(struct rte_mbuf *mbuf) mbuf->packet_type = ETHER_TYPE_IPv4; mbuf->l3_len = ip4_hdrlen(mbuf); - /* reuse @userdata/@udata64 for prio (used by tc:pfifo_fast) */ - mbuf->udata64 = ((ip4_hdr(mbuf)->type_of_service >> 1) & 15); - err = neigh_output(AF_INET, (union inet_addr *)&nexthop, mbuf, rt->port); route4_put(rt); return err; diff --git a/src/ipvs/ip_vs_conn.c b/src/ipvs/ip_vs_conn.c index 384cd60a9..d527021f9 100644 --- a/src/ipvs/ip_vs_conn.c +++ b/src/ipvs/ip_vs_conn.c @@ -35,6 +35,7 @@ #include "ctrl.h" #include "conf/conn.h" #include "sys_time.h" +#include "global_data.h" #define DPVS_CONN_TBL_BITS 20 #define DPVS_CONN_TBL_SIZE (1 << DPVS_CONN_TBL_BITS) @@ -1239,8 +1240,6 @@ struct ip_vs_conn_array_list { ipvs_conn_entry_t array[0]; }; -static uint8_t g_slave_lcore_nb; -static uint64_t g_slave_lcore_mask; static struct list_head conn_to_dump; static inline char* get_conn_state_name(uint16_t proto, uint16_t state) @@ -1737,7 +1736,6 @@ static int conn_ctrl_init(void) int err; INIT_LIST_HEAD(&conn_to_dump); - netif_get_slave_lcores(&g_slave_lcore_nb, &g_slave_lcore_mask); if ((err = register_conn_get_msg()) != EDPVS_OK) return err; diff --git a/src/main.c b/src/main.c index 6e5021d47..bff5caca7 100644 --- a/src/main.c +++ b/src/main.c @@ -77,13 +77,13 @@ extern int log_slave_init(void); DPVS_MODULE(MODULE_TIMER, "timer", \ dpvs_timer_init, dpvs_timer_term), \ DPVS_MODULE(MODULE_TC, "tc", \ - tc_init, NULL), \ + tc_init, tc_term), \ DPVS_MODULE(MODULE_NETIF, "netif", \ netif_init, netif_term), \ - DPVS_MODULE(MODULE_CTRL, "cp", \ + DPVS_MODULE(MODULE_CTRL, "ctrl", \ ctrl_init, ctrl_term), \ - DPVS_MODULE(MODULE_TC_CTRL, "tc cp", \ - tc_ctrl_init, NULL), \ + DPVS_MODULE(MODULE_TC_CTRL, "tc_ctrl", \ + tc_ctrl_init, tc_ctrl_term), \ DPVS_MODULE(MODULE_VLAN, "vlan", \ vlan_init, NULL), \ DPVS_MODULE(MODULE_INET, "inet", \ diff --git a/src/netif.c b/src/netif.c index e839cdcfe..8407ed576 100644 --- a/src/netif.c +++ b/src/netif.c @@ -47,12 +47,12 @@ #define NETIF_PKTPOOL_NB_MBUF_DEF 65535 #define NETIF_PKTPOOL_NB_MBUF_MIN 1023 #define NETIF_PKTPOOL_NB_MBUF_MAX 134217727 -static int netif_pktpool_nb_mbuf = NETIF_PKTPOOL_NB_MBUF_DEF; +int netif_pktpool_nb_mbuf = NETIF_PKTPOOL_NB_MBUF_DEF; #define NETIF_PKTPOOL_MBUF_CACHE_DEF 256 #define NETIF_PKTPOOL_MBUF_CACHE_MIN 32 #define NETIF_PKTPOOL_MBUF_CACHE_MAX 8192 -static int netif_pktpool_mbuf_cache = NETIF_PKTPOOL_MBUF_CACHE_DEF; +int netif_pktpool_mbuf_cache = NETIF_PKTPOOL_MBUF_CACHE_DEF; #define NETIF_NB_RX_DESC_DEF 256 #define NETIF_NB_RX_DESC_MIN 16 @@ -147,15 +147,6 @@ static struct list_head port_ntab[NETIF_PORT_TABLE_BUCKETS]; /* hashed by name * static void kni_ingress(struct rte_mbuf *mbuf, struct netif_port *dev); static void kni_lcore_loop(void *dummy); - -lcoreid_t g_master_lcore_id; -/* By default g_kni_lcore_id is 0 and it indicates KNI core is not configured. */ -lcoreid_t g_kni_lcore_id = 0; -static uint8_t g_slave_lcore_num; -static uint8_t g_isol_rx_lcore_num; -static uint64_t g_slave_lcore_mask; -static uint64_t g_isol_rx_lcore_mask; - bool dp_vs_fdir_filter_enable = true; bool is_lcore_id_valid(lcoreid_t cid) diff --git a/src/tc/cls.c b/src/tc/cls.c index d5ffd51fa..d05be6b64 100644 --- a/src/tc/cls.c +++ b/src/tc/cls.c @@ -26,7 +26,8 @@ #include "tc/sch.h" #include "tc/cls.h" -static inline tc_handle_t cls_alloc_handle(struct Qsch *sch) +/* for master lcore only */ +tc_handle_t cls_alloc_handle(struct Qsch *sch) { int i = 0x8000; static uint32_t autohandle = TC_H_MAKE(0x80000000U, 0); @@ -37,7 +38,7 @@ static inline tc_handle_t cls_alloc_handle(struct Qsch *sch) autohandle = TC_H_MAKE(0x80000000U, 0); if (!tc_cls_lookup(sch, autohandle)) return autohandle; - } while (--i > 0); + } while (--i > 0); return 0; } @@ -71,31 +72,33 @@ struct tc_cls *tc_cls_create(struct Qsch *sch, const char *kind, { struct tc_cls_ops *ops = NULL; struct tc_cls *cls = NULL; - int err; + int err = EDPVS_INVAL; assert(sch && kind && errp); - err = EDPVS_NOTSUPP; - ops = tc_cls_ops_get(kind); - if (!ops) + /* handle must be set */ + if (unlikely(!handle)) { + err = EDPVS_INVAL; goto errout; + } - cls = cls_alloc(sch, ops); - if (!cls) + ops = tc_cls_ops_lookup(kind); + if (!ops) { + err = EDPVS_NOTSUPP; goto errout; - - if (handle == 0) { - handle = cls_alloc_handle(sch); - if (handle == 0) - goto errout; } - /* must check since handle may not zero. */ if (tc_cls_lookup(sch, handle)) { err = EDPVS_EXIST; goto errout; } + cls = cls_alloc(sch, ops); + if (!cls) { + err = EDPVS_NOMEM; + goto errout; + } + cls->handle = handle; cls->prio = prio; if (pkt_type) @@ -126,8 +129,8 @@ struct tc_cls *tc_cls_create(struct Qsch *sch, const char *kind, return cls; errout: - if (ops) - tc_cls_ops_put(ops); + if (cls) + cls_free(cls); *errp = err; return NULL; } @@ -143,7 +146,6 @@ void tc_cls_destroy(struct tc_cls *cls) if (ops->destroy) ops->destroy(cls); - tc_cls_ops_put(ops); cls_free(cls); } diff --git a/src/tc/cls_match.c b/src/tc/cls_match.c index 5c7e307e0..cf2e221bb 100644 --- a/src/tc/cls_match.c +++ b/src/tc/cls_match.c @@ -22,6 +22,7 @@ */ #include #include +#include #include #include #include "netif.h" @@ -29,6 +30,8 @@ #include "tc/tc.h" #include "tc/sch.h" #include "tc/cls.h" +#include "linux_ipv6.h" +#include "ipv6.h" #include "conf/match.h" #include "conf/tc.h" @@ -48,8 +51,10 @@ static int match_classify(struct tc_cls *cls, struct rte_mbuf *mbuf, struct dp_vs_match *m = &priv->match; struct ether_hdr *eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); struct iphdr *iph = NULL; + struct ip6_hdr *ip6h = NULL; struct tcphdr *th; struct udphdr *uh; + uint8_t l4_proto = 0; int offset = sizeof(*eh); __be16 pkt_type = eh->ether_type; __be16 sport, dport; @@ -77,6 +82,9 @@ static int match_classify(struct tc_cls *cls, struct rte_mbuf *mbuf, l2parse: switch (ntohs(pkt_type)) { case ETH_P_IP: + if (m->af != AF_INET && m->af != AF_UNSPEC) + goto done; + if (mbuf_may_pull(mbuf, offset + sizeof(struct iphdr)) != 0) { err = TC_ACT_SHOT; goto done; @@ -97,9 +105,39 @@ static int match_classify(struct tc_cls *cls, struct rte_mbuf *mbuf, goto done; } + l4_proto = iph->protocol; offset += (iph->ihl << 2); break; + case ETH_P_IPV6: + if (m->af != AF_INET6 && m->af != AF_UNSPEC) + goto done; + if (mbuf_may_pull(mbuf, offset + sizeof(struct ip6_hdr)) != 0) { + err = TC_ACT_SHOT; + goto done; + } + + ip6h = rte_pktmbuf_mtod_offset(mbuf, struct ip6_hdr *, offset); + if (!ipv6_addr_any(&m->srange.max_addr.in6)) { + if (ipv6_addr_cmp_u128(&ip6h->ip6_src, &m->srange.min_addr.in6) < 0 || + ipv6_addr_cmp_u128(&ip6h->ip6_src, &m->srange.max_addr.in6) > 0) + goto done; + } + + if (!ipv6_addr_any(&m->srange.max_addr.in6)) { + if (ipv6_addr_cmp_u128(&ip6h->ip6_dst, &m->srange.min_addr.in6) < 0 || + ipv6_addr_cmp_u128(&ip6h->ip6_dst, &m->srange.max_addr.in6) > 0) + goto done; + } + break; + + l4_proto = ip6h->ip6_nxt; + offset = ip6_skip_exthdr(mbuf, offset + sizeof(struct ip6_hdr), &l4_proto); + if (offset < 0) { + err = TC_ACT_SHOT; + goto done; + } + case ETH_P_8021Q: veh = (struct vlan_ethhdr *)eh; pkt_type = veh->h_vlan_encapsulated_proto; @@ -111,10 +149,10 @@ static int match_classify(struct tc_cls *cls, struct rte_mbuf *mbuf, } /* check if protocol matches */ - if (priv->proto && priv->proto != iph->protocol) + if (priv->proto && l4_proto && priv->proto != l4_proto) goto done; - switch (iph->protocol) { + switch (l4_proto) { case IPPROTO_TCP: if (mbuf_may_pull(mbuf, offset + sizeof(struct tcphdr)) != 0) { err = TC_ACT_SHOT; @@ -161,17 +199,22 @@ static int match_classify(struct tc_cls *cls, struct rte_mbuf *mbuf, done: #if defined(CONFIG_TC_DEBUG) - if (iph) { + if (iph || ip6h) { char sip[64], dip[64]; char cls_id[16], qsch_id[16]; - inet_ntop(AF_INET, &iph->saddr, sip, sizeof(sip)); - inet_ntop(AF_INET, &iph->daddr, dip, sizeof(dip)); + if (ip6h) { + inet_ntop(AF_INET6, &ip6h->saddr, sip, sizeof(sip)); + inet_ntop(AF_INET6, &ip6h->daddr, dip, sizeof(dip)); + } else { + inet_ntop(AF_INET, &iph->saddr, sip, sizeof(sip)); + inet_ntop(AF_INET, &iph->daddr, dip, sizeof(dip)); + } tc_handle_itoa(cls->handle, cls_id, sizeof(cls_id)); tc_handle_itoa(priv->result.sch_id, qsch_id, sizeof(qsch_id)); RTE_LOG(DEBUG, TC, "cls %s %s %s:%u -> %s:%u %s %s\n", - cls_id, inet_proto_name(iph->protocol), + cls_id, inet_proto_name(l4_proto), sip, sport, dip, dport, (err == TC_ACT_OK ? "target" : "miss"), (err == TC_ACT_OK ? \ @@ -193,15 +236,35 @@ static int match_init(struct tc_cls *cls, const void *arg) if (copt->proto) priv->proto = copt->proto; + if (copt->match.af) + priv->match.af = copt->match.af; + if (strlen(copt->match.iifname)) snprintf(priv->match.iifname, IFNAMSIZ, "%s", copt->match.iifname); if (strlen(copt->match.oifname)) snprintf(priv->match.oifname, IFNAMSIZ, "%s", copt->match.oifname); - if (ntohl(copt->match.srange.max_addr.in.s_addr) != INADDR_ANY) { - priv->match.srange.min_addr = copt->match.srange.min_addr; - priv->match.srange.max_addr = copt->match.srange.max_addr; + if (copt->match.af == AF_INET6) { + if (!ipv6_addr_any(&copt->match.srange.max_addr.in6)) { + priv->match.srange.min_addr = copt->match.srange.min_addr; + priv->match.srange.max_addr = copt->match.srange.max_addr; + } + + if (!ipv6_addr_any(&copt->match.drange.max_addr.in6)) { + priv->match.drange.min_addr = copt->match.drange.min_addr; + priv->match.drange.max_addr = copt->match.drange.max_addr; + } + } else { /* ipv4 by default */ + if (ntohl(copt->match.srange.max_addr.in.s_addr) != INADDR_ANY) { + priv->match.srange.min_addr = copt->match.srange.min_addr; + priv->match.srange.max_addr = copt->match.srange.max_addr; + } + + if (ntohl(copt->match.drange.max_addr.in.s_addr) != INADDR_ANY) { + priv->match.drange.min_addr = copt->match.drange.min_addr; + priv->match.drange.max_addr = copt->match.drange.max_addr; + } } if (ntohs(copt->match.srange.max_port)) { @@ -209,11 +272,6 @@ static int match_init(struct tc_cls *cls, const void *arg) priv->match.srange.max_port = copt->match.srange.max_port; } - if (ntohl(copt->match.drange.max_addr.in.s_addr) != INADDR_ANY) { - priv->match.drange.min_addr = copt->match.drange.min_addr; - priv->match.drange.max_addr = copt->match.drange.max_addr; - } - if (ntohs(copt->match.drange.max_port)) { priv->match.drange.min_port = copt->match.drange.min_port; priv->match.drange.max_port = copt->match.drange.max_port; @@ -222,7 +280,7 @@ static int match_init(struct tc_cls *cls, const void *arg) if (copt->result.drop) { priv->result.drop = copt->result.drop; } else { - /* 0: (TC_H_UNSPEC) is valid handle but not valid target */ + /* 0: (TC_H_UNSPEC) is not valid target */ if (copt->result.sch_id != TC_H_UNSPEC) { priv->result.sch_id = copt->result.sch_id; priv->result.drop = false; /* exclusive with sch_id */ diff --git a/src/tc/sch_fifo.c b/src/tc/sch_fifo.c index 615a56c36..8ff79b61b 100644 --- a/src/tc/sch_fifo.c +++ b/src/tc/sch_fifo.c @@ -30,7 +30,7 @@ extern struct Qsch_ops bfifo_sch_ops; static int pfifo_enqueue(struct Qsch *sch, struct rte_mbuf *mbuf) { - if (likely(sch->this_q.qlen < sch->limit)) + if (likely(sch->q.qlen < sch->limit)) return qsch_enqueue_tail(sch, mbuf); #if defined(CONFIG_TC_DEBUG) @@ -41,7 +41,7 @@ static int pfifo_enqueue(struct Qsch *sch, struct rte_mbuf *mbuf) static int bfifo_enqueue(struct Qsch *sch, struct rte_mbuf *mbuf) { - if (likely(sch->this_qstats.backlog + mbuf->pkt_len <= sch->limit)) + if (likely(sch->qstats.backlog + mbuf->pkt_len <= sch->limit)) return qsch_enqueue_tail(sch, mbuf); #if defined(CONFIG_TC_DEBUG) @@ -68,7 +68,7 @@ static int fifo_init(struct Qsch *sch, const void *arg) #if 0 limit = dev->txq_desc_nb; #else - limit = 128; + limit = 1024; #endif if (is_bfifo) @@ -113,32 +113,3 @@ struct Qsch_ops bfifo_sch_ops = { .reset = qsch_reset_queue, .dump = fifo_dump, }; - -int fifo_set_limit(struct Qsch *sch, unsigned int limit) -{ - struct tc_fifo_qopt qopt = { .limit = limit }; - - if (strncmp(sch->ops->name + 1, "fifo", 4) != 0) - return EDPVS_INVAL; - - return sch->ops->change(sch, &qopt); -} - -struct Qsch *fifo_create_dflt(struct Qsch *sch, struct Qsch_ops *ops, - unsigned int limit) -{ - struct Qsch *q; - int err; - - q = qsch_create_dflt(qsch_dev(sch), ops, sch->handle); - if (!q) - return NULL; - - err = fifo_set_limit(q, limit); - if (err != EDPVS_OK) { - qsch_destroy(q); - return NULL; - } - - return q; -} diff --git a/src/tc/sch_generic.c b/src/tc/sch_generic.c index b1cd605d9..81d36eb15 100644 --- a/src/tc/sch_generic.c +++ b/src/tc/sch_generic.c @@ -29,18 +29,12 @@ /* may configurable in the future. */ static int dev_tx_weight = 64; -static int qsch_recycle_timeout = 5; static inline int sch_hash(tc_handle_t handle, int hash_size) { return handle % hash_size; } -static inline int sch_qlen(struct Qsch *sch) -{ - return sch->this_q.qlen; -} - /* return current queue length (num of packets in queue), * or 0 if queue is empty or throttled. */ static inline int sch_dequeue_xmit(struct Qsch *sch, int *npkt) @@ -53,71 +47,30 @@ static inline int sch_dequeue_xmit(struct Qsch *sch, int *npkt) return 0; netif_hard_xmit(mbuf, netif_port_get(mbuf->port)); - return sch_qlen(sch); + return sch->q.qlen; } static inline struct Qsch *sch_alloc(struct netif_tc *tc, struct Qsch_ops *ops) { struct Qsch *sch; unsigned int size = TC_ALIGN(sizeof(*sch)) + ops->priv_size; - lcoreid_t cid; sch = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); if (!sch) return NULL; - for (cid = 0; cid < NELEMS(sch->q); cid++) - tc_mbuf_head_init(&sch->q[cid]); - + tc_mbuf_head_init(&sch->q); INIT_LIST_HEAD(&sch->cls_list); INIT_HLIST_NODE(&sch->hlist); + sch->tc = tc; sch->ops = ops; - rte_atomic32_set(&sch->refcnt, 1); + sch->refcnt = 1; return sch; } -static inline void sch_free(struct Qsch *sch) -{ - rte_free(sch); -} - -static void __qsch_destroy(struct Qsch *sch) -{ - struct Qsch_ops *ops = sch->ops; - - if (ops->reset) - ops->reset(sch); - if (ops->destroy) - ops->destroy(sch); - - tc_qsch_ops_put(ops); - sch_free(sch); -} - -static int sch_recycle(void *arg) -{ - struct Qsch *sch = arg; - - if (rte_atomic32_read(&sch->refcnt)) { - dpvs_timer_reset_nolock(&sch->rc_timer, true); - RTE_LOG(WARNING, TC, "%s: sch %u is in use.\n", __func__, sch->handle); - return DTIMER_OK; - } - - __qsch_destroy(sch); - return DTIMER_STOP; -} - -static void sch_dying(struct Qsch *sch) -{ - struct timeval timeout = { qsch_recycle_timeout, 0 }; - - dpvs_timer_sched(&sch->rc_timer, &timeout, sch_recycle, sch, true); -} - -static inline tc_handle_t sch_alloc_handle(struct netif_port *dev) +tc_handle_t sch_alloc_handle(struct netif_tc *tc) { int i = 0x8000; static uint32_t autohandle = TC_H_MAKE(0x80000000U, 0); @@ -126,9 +79,9 @@ static inline tc_handle_t sch_alloc_handle(struct netif_port *dev) autohandle += TC_H_MAKE(0x10000U, 0); if (autohandle == TC_H_MAKE(TC_H_ROOT, 0)) autohandle = TC_H_MAKE(0x80000000U, 0); - if (!qsch_lookup_noref(&dev->tc, autohandle)) + if (!qsch_lookup_noref(tc, autohandle)) return autohandle; - } while (--i > 0); + } while (--i > 0); return 0; } @@ -139,61 +92,56 @@ struct Qsch *qsch_create(struct netif_port *dev, const char *kind, { int err; struct Qsch_ops *ops = NULL; - struct Qsch *sch = NULL; + struct Qsch *sch = NULL, *psch = NULL; struct netif_tc *tc = netif_tc(dev); assert(dev && kind && errp); - err = EDPVS_NOTSUPP; + err = EDPVS_INVAL; ops = tc_qsch_ops_lookup(kind); - if (!ops) + if (!ops) { + err = EDPVS_NOTSUPP; goto errout; + } - err = EDPVS_NOMEM; sch = sch_alloc(tc, ops); - if (!sch) + if (!sch) { + err = EDPVS_NOMEM; goto errout; + } + if (parent != 0) { + psch = qsch_lookup_noref(tc, parent); + if (!psch) { + err = EDPVS_NOTEXIST; + goto errout; + } + } sch->parent = parent; if (handle == TC_H_INGRESS) { sch->flags |= QSCH_F_INGRESS; - handle = TC_H_MAKE(TC_H_INGRESS, 0); - - /* already exist ? */ if (tc->qsch_ingress) { err = EDPVS_EXIST; goto errout; } - } else { /* egress */ - struct Qsch *q; - - if (handle == 0) { - handle = sch_alloc_handle(dev); - if (!handle) - goto errout; - } - - /* already exist ? */ - q = qsch_lookup_noref(tc, handle); - if (q) { + } else if (handle == TC_H_ROOT) { + if (tc->qsch) { err = EDPVS_EXIST; goto errout; } - - /* if use this API, parent must not be root - * and must be exist */ - if (parent == TC_H_ROOT) { + } else { + if (handle == 0 || parent == 0) { err = EDPVS_INVAL; goto errout; - } else { - q = qsch_lookup_noref(tc, parent); - if (!q) { - err = EDPVS_NOTEXIST; - goto errout; - } } - } + sch->flags |= (psch->flags & QSCH_F_INGRESS); + + if (qsch_lookup_noref(tc, handle)) { + err = EDPVS_EXIST; + goto errout; + } + } sch->handle = handle; if (ops->init && (err = ops->init(sch, arg)) != EDPVS_OK) { @@ -202,69 +150,22 @@ struct Qsch *qsch_create(struct netif_port *dev, const char *kind, goto errout; } - if (sch->flags & QSCH_F_INGRESS) { - tc->qsch_ingress = sch; - sch->tc->qsch_cnt++; - } else - qsch_hash_add(sch, false); + qsch_hash_add(sch, false); + *errp = EDPVS_OK; return sch; errout: if (sch) sch_free(sch); - if (ops) - tc_qsch_ops_put(ops); *errp = err; return NULL; } -struct Qsch *qsch_create_dflt(struct netif_port *dev, struct Qsch_ops *ops, - tc_handle_t parent) -{ - int err; - struct Qsch *sch; - assert(dev && ops); - - tc_qsch_ops_get(ops); - - sch = sch_alloc(&dev->tc, ops); - if (!sch) { - tc_qsch_ops_put(ops); - return NULL; - } - - sch->parent = parent; - - if (ops->init && (err = ops->init(sch, NULL)) != EDPVS_OK) { - tc_qsch_ops_put(ops); - qsch_destroy(sch); - return NULL; - } - - return sch; -} - void qsch_destroy(struct Qsch *sch) { - if (sch->flags & QSCH_F_INGRESS) { - assert(sch->tc->qsch_ingress == sch); - sch->tc->qsch_ingress = NULL; - sch->tc->qsch_cnt--; - } else if (sch == sch->tc->qsch) { - sch->tc->qsch = NULL; - sch->tc->qsch_cnt--; - } else { - qsch_hash_del(sch); - } - - if (!rte_atomic32_dec_and_test(&sch->refcnt)) { - RTE_LOG(WARNING, TC, "%s: sch %u is in use.\n", __func__, sch->handle); - sch_dying(sch); - return; - } - - __qsch_destroy(sch); + qsch_hash_del(sch); + qsch_put(sch); } int qsch_change(struct Qsch *sch, const void *arg) @@ -275,27 +176,20 @@ int qsch_change(struct Qsch *sch, const void *arg) return sch->ops->change(sch, arg); } -void qsch_reset(struct Qsch *sch) -{ - lcoreid_t cid; - - if (sch->ops->reset) - sch->ops->reset(sch); - - for (cid = 0; cid < NELEMS(sch->q); cid++) - sch->q[cid].qlen = 0; -} - void qsch_hash_add(struct Qsch *sch, bool invisible) { int hash; assert(sch && sch->tc && sch->tc->qsch_hash); - if (sch->parent == TC_H_ROOT || (sch->flags & QSCH_F_INGRESS)) - return; + if (sch->handle == TC_H_INGRESS) { + sch->tc->qsch_ingress = sch; + } else if (sch->handle == TC_H_ROOT) { + sch->tc->qsch= sch; + } else { + hash = sch_hash(sch->handle, sch->tc->qsch_hash_size); + hlist_add_head(&sch->hlist, &sch->tc->qsch_hash[hash]); + } - hash = sch_hash(sch->handle, sch->tc->qsch_hash_size); - hlist_add_head(&sch->hlist, &sch->tc->qsch_hash[hash]); sch->tc->qsch_cnt++; if (invisible) @@ -306,10 +200,14 @@ void qsch_hash_del(struct Qsch *sch) { assert(sch && sch->tc && sch->tc->qsch_hash); - if (sch->parent == TC_H_ROOT || (sch->flags & QSCH_F_INGRESS)) - return; + if (sch == sch->tc->qsch_ingress) { + sch->tc->qsch_ingress = NULL; + } else if (sch == sch->tc->qsch) { + sch->tc->qsch = NULL; + } else { + hlist_del_init(&sch->hlist); + } - hlist_del_init(&sch->hlist); sch->tc->qsch_cnt--; } @@ -319,18 +217,18 @@ struct Qsch *qsch_lookup_noref(const struct netif_tc *tc, tc_handle_t handle) struct Qsch *sch; assert(tc->qsch_hash && tc->qsch_hash_size); - if (likely(tc->qsch && tc->qsch->handle == handle)) + if (tc->qsch && tc->qsch->handle == handle) return tc->qsch; + if (tc->qsch_ingress && tc->qsch_ingress->handle == handle) + return tc->qsch_ingress; + hash = sch_hash(handle, tc->qsch_hash_size); hlist_for_each_entry(sch, &tc->qsch_hash[hash], hlist) { - if (likely(sch->handle == handle)) + if (sch->handle == handle) return sch; } - if (tc->qsch_ingress && tc->qsch_ingress->handle == handle) - return tc->qsch_ingress; - return NULL; } diff --git a/src/tc/sch_pfifo_fast.c b/src/tc/sch_pfifo_fast.c index 5e58fbbba..855bbfebd 100644 --- a/src/tc/sch_pfifo_fast.c +++ b/src/tc/sch_pfifo_fast.c @@ -25,6 +25,8 @@ #include #include #include "netif.h" +#include "vlan.h" +#include "ipv6.h" #include "tc/tc.h" #include "conf/tc.h" @@ -39,11 +41,8 @@ static const uint8_t prio2band[TC_PRIO_MAX + 1] = { static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; struct pfifo_fast_priv { - uint32_t bitmap[DPVS_MAX_LCORE]; - struct tc_mbuf_head q[DPVS_MAX_LCORE][PFIFO_FAST_BANDS]; - -#define this_bitmap bitmap[rte_lcore_id()] -#define this_pff_q q[rte_lcore_id()] + uint32_t bitmap; + struct tc_mbuf_head q[PFIFO_FAST_BANDS]; }; static inline struct tc_mbuf_head *band2list(struct pfifo_fast_priv *priv, @@ -51,15 +50,7 @@ static inline struct tc_mbuf_head *band2list(struct pfifo_fast_priv *priv, { assert(band >= 0 && band < PFIFO_FAST_BANDS); - return priv->this_pff_q + band; -} - -static inline struct tc_mbuf_head *band2list_cpu(struct pfifo_fast_priv *priv, - int band, lcoreid_t cid) -{ - assert(band >= 0 && band < PFIFO_FAST_BANDS); - - return priv->q[cid] + band; + return priv->q + band; } static int pfifo_fast_enqueue(struct Qsch *sch, struct rte_mbuf *mbuf) @@ -69,17 +60,44 @@ static int pfifo_fast_enqueue(struct Qsch *sch, struct rte_mbuf *mbuf) struct pfifo_fast_priv *priv; struct tc_mbuf_head *qh; + struct ether_hdr *eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + struct iphdr *iph = NULL; + struct ip6_hdr *ip6h = NULL; + struct vlan_ethhdr *veh; + int offset = sizeof(*eh); + __be16 pkt_type = eh->ether_type; + /* sch->limit is same as dev->txq_desc_nb */ - if (unlikely(sch->this_q.qlen >= sch->limit)) { + if (unlikely(sch->q.qlen >= sch->limit)) { #if defined(CONFIG_TC_DEBUG) RTE_LOG(WARNING, TC, "%s: queue is full.\n", __func__); #endif return qsch_drop(sch, mbuf); } - if (unlikely(mbuf->udata64 > 0 && mbuf->udata64 <= TC_PRIO_MAX && - mbuf->packet_type == ETH_P_IP)) - prio = (uint8_t)mbuf->udata64; +l2parse: + switch (ntohs(pkt_type)) { + case ETH_P_8021Q: + veh = (struct vlan_ethhdr *)eh; + pkt_type = veh->h_vlan_encapsulated_proto; + offset += VLAN_HLEN; + goto l2parse; + case ETH_P_IP: + if (unlikely(mbuf_may_pull(mbuf, offset + sizeof(struct iphdr)) != 0)) + break; + iph = rte_pktmbuf_mtod_offset(mbuf, struct iphdr *, offset); + prio = (iph->tos >> 1) & 0x0F; + break; + case ETH_P_IPV6: + if (unlikely(mbuf_may_pull(mbuf, offset + sizeof(struct ip6_hdr)) != 0)) + break; + ip6h = rte_pktmbuf_mtod_offset(mbuf, struct ip6_hdr *, offset); + prio = (rte_be_to_cpu_32(ip6h->ip6_flow) >> 21) & 0x0F; + break; + default: + prio = 0; + break; + } band = prio2band[prio]; priv = qsch_priv(sch); @@ -87,9 +105,9 @@ static int pfifo_fast_enqueue(struct Qsch *sch, struct rte_mbuf *mbuf) err = __qsch_enqueue_tail(sch, mbuf, qh); if (err == EDPVS_OK) { - priv->this_bitmap |= (1 << band); - sch->this_q.qlen++; - sch->this_qstats.qlen++; + priv->bitmap |= (1 << band); + sch->qstats.qlen++; + sch->qstats.backlog += mbuf->pkt_len; } return err; @@ -98,7 +116,7 @@ static int pfifo_fast_enqueue(struct Qsch *sch, struct rte_mbuf *mbuf) static struct rte_mbuf *pfifo_fast_dequeue(struct Qsch *sch) { struct pfifo_fast_priv *priv = qsch_priv(sch); - int band = bitmap2band[priv->this_bitmap]; + int band = bitmap2band[priv->bitmap]; struct tc_mbuf_head *qh; struct rte_mbuf *mbuf; @@ -109,12 +127,14 @@ static struct rte_mbuf *pfifo_fast_dequeue(struct Qsch *sch) mbuf = __qsch_dequeue_head(sch, qh); if (mbuf) { - sch->this_q.qlen--; - sch->this_qstats.qlen--; + sch->qstats.qlen--; + sch->qstats.backlog -= mbuf->pkt_len; + sch->bstats.packets++; + sch->bstats.bytes += mbuf->pkt_len; } if (likely(qh->qlen == 0)) - priv->this_bitmap &= ~(1 << band); + priv->bitmap &= ~(1 << band); return mbuf; } @@ -122,7 +142,7 @@ static struct rte_mbuf *pfifo_fast_dequeue(struct Qsch *sch) static struct rte_mbuf *pfifo_fast_peek(struct Qsch *sch) { struct pfifo_fast_priv *priv = qsch_priv(sch); - int band = bitmap2band[priv->this_bitmap]; + int band = bitmap2band[priv->bitmap]; struct tc_mbuf_head *qh; struct tc_mbuf *tm; @@ -130,23 +150,23 @@ static struct rte_mbuf *pfifo_fast_peek(struct Qsch *sch) return NULL; qh = band2list(priv, band); + if (unlikely(list_empty(&qh->mbufs))) + return NULL; + tm = list_first_entry(&qh->mbufs, struct tc_mbuf, list); if (tm) return tm->mbuf; - else - return NULL; + + return NULL; } static int pfifo_fast_init(struct Qsch *sch, const void *arg) { int band; - lcoreid_t cid; struct pfifo_fast_priv *priv = qsch_priv(sch); - for (cid = 0; cid < NELEMS(priv->q); cid++) { - for (band = 0; band < PFIFO_FAST_BANDS; band++) { - tc_mbuf_head_init(band2list_cpu(priv, band, cid)); - } + for (band = 0; band < PFIFO_FAST_BANDS; band++) { + tc_mbuf_head_init(band2list(priv, band)); } /* FIXME: txq_desc_nb is not set when alloc device. @@ -155,7 +175,7 @@ static int pfifo_fast_init(struct Qsch *sch, const void *arg) #if 0 sch->limit = qsch_dev(sch)->txq_desc_nb; #else - sch->limit = 128; + sch->limit = 1024; #endif return EDPVS_OK; } @@ -163,17 +183,15 @@ static int pfifo_fast_init(struct Qsch *sch, const void *arg) static void pfifo_fast_reset(struct Qsch *sch) { int band; - lcoreid_t cid; struct pfifo_fast_priv *priv = qsch_priv(sch); - for (cid = 0; cid < NELEMS(priv->q); cid++) { - for (band = 0; band < PFIFO_FAST_BANDS; band++) - __qsch_reset_queue(sch, band2list_cpu(priv, band, cid)); + for (band = 0; band < PFIFO_FAST_BANDS; band++) + __qsch_reset_queue(sch, band2list(priv, band)); - priv->bitmap[cid] = 0; - sch->q[cid].qlen = 0; - sch->qstats[cid].qlen = 0; - } + sch->qstats.qlen = 0; + sch->qstats.backlog = 0; + + priv->bitmap = 0; } static int pfifo_fast_dump(struct Qsch *sch, void *arg) diff --git a/src/tc/sch_shm.c b/src/tc/sch_shm.c new file mode 100644 index 000000000..181fa18e6 --- /dev/null +++ b/src/tc/sch_shm.c @@ -0,0 +1,161 @@ +/* + * DPVS is a software load balancer (Virtual Server) based on DPDK. + * + * Copyright (C) 2021 iQIYI (www.iqiyi.com). + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Qsch private data (global data) that's shared by all workers. + * + * Wenchao Yu , Mar. 2021, initial. + */ +#include +#include "netif.h" +#include "tc/sch.h" +#include "list.h" + +#define SCH_SHM_TABLE_BITS 8 +#define SCH_SHM_TABLE_SIZE (1 << SCH_SHM_TABLE_BITS) +#define SCH_SHM_TALBE_MASK ((SCH_SHM_TABLE_SIZE) - 1) + +struct sch_shm_obj { + struct list_head list; + portid_t portid; + tc_handle_t handle; + uint32_t refcnt; + uint32_t len; + char data[0]; +}; + +struct shm_hash_node { + struct list_head bucket; + rte_spinlock_t lock; +}; + +struct shm_hash_node *sch_shm_table; + +static inline int sch_shm_hash(portid_t pid, tc_handle_t handle) +{ + return (((uint16_t)pid + 419) ^ ((uint16_t)(TC_H_MAJ(handle) >> 16)) ^ ((uint16_t)TC_H_MIN(handle))) & SCH_SHM_TALBE_MASK; +} + +static struct sch_shm_obj* __sch_shm_lookup(portid_t pid, tc_handle_t handle) +{ + struct sch_shm_obj *obj; + int hash = sch_shm_hash(pid, handle); + + list_for_each_entry(obj, &sch_shm_table[hash].bucket, list) { + if (obj->portid == pid && obj->handle == handle) + return obj; + } + + return NULL; +} + +void *qsch_shm_get_or_create(struct Qsch *sch, uint32_t len) +{ + struct sch_shm_obj *obj = NULL; + portid_t pid = sch->tc->dev->id; + tc_handle_t handle = sch->handle; + int hash = sch_shm_hash(pid, handle); + + rte_spinlock_lock(&sch_shm_table[hash].lock); + obj = __sch_shm_lookup(pid, handle); + if (obj) { + assert(obj->len == len); + obj->refcnt++; + goto done; + } + + obj = rte_zmalloc("qsch_shm_obj", sizeof(struct sch_shm_obj) + len, RTE_CACHE_LINE_SIZE); + if (!obj) + goto done; + + obj->portid = pid; + obj->handle = handle; + obj->len = len; + obj->refcnt = 1; + + list_add_tail(&obj->list, &sch_shm_table[hash].bucket); + +done: + rte_spinlock_unlock(&sch_shm_table[hash].lock); + if (obj) + return (void *)obj->data; + return NULL; +} + +int qsch_shm_put_or_destroy(struct Qsch *sch) +{ + int err = EDPVS_OK; + struct sch_shm_obj *obj = NULL; + portid_t pid = sch->tc->dev->id; + tc_handle_t handle = sch->handle; + int hash = sch_shm_hash(pid, handle); + + rte_spinlock_lock(&sch_shm_table[hash].lock); + obj = __sch_shm_lookup(pid, handle); + if (!obj) { + err = EDPVS_NOTEXIST; + goto done; + } + + if (--obj->refcnt == 0) { + list_del(&obj->list); + rte_free(obj); + } + +done: + rte_spinlock_unlock(&sch_shm_table[hash].lock); + return err; +} + +int qsch_shm_init(void) +{ + int i; + + sch_shm_table = rte_zmalloc("sch_shm_table", + SCH_SHM_TABLE_SIZE * sizeof(struct shm_hash_node), RTE_CACHE_LINE_SIZE); + if (!sch_shm_table) + return EDPVS_NOMEM; + + for (i = 0; i < SCH_SHM_TABLE_SIZE; i++) { + INIT_LIST_HEAD(&sch_shm_table[i].bucket); + rte_spinlock_init(&sch_shm_table[i].lock); + } + return EDPVS_OK; +} + +int qsch_shm_term(void) +{ + int i; + struct sch_shm_obj *obj, *next; + + for (i = 0; i < SCH_SHM_TABLE_SIZE; i++) { + rte_spinlock_lock(&sch_shm_table[i].lock); + list_for_each_entry_safe(obj, next, &sch_shm_table[i].bucket, list) { + if (--obj->refcnt == 0) { + list_del(&obj->list); + rte_free(obj); + } + // FIXME: free the obj whose refcnt != 0 + } + rte_spinlock_unlock(&sch_shm_table[i].lock); + } + + rte_free(sch_shm_table); + + return EDPVS_OK; +} diff --git a/src/tc/sch_tbf.c b/src/tc/sch_tbf.c index b4bd28073..4209790ea 100644 --- a/src/tc/sch_tbf.c +++ b/src/tc/sch_tbf.c @@ -31,6 +31,13 @@ extern struct Qsch_ops bfifo_sch_ops; extern struct Qsch_ops tbf_sch_ops; +struct tbf_sch_shared { + int64_t tokens; /* current tokens, in time */ + int64_t ptokens; /* current peak tokens, in time */ + int64_t t_c; /* Time check-point */ + rte_spinlock_t lock; +}; + struct tbf_sch_priv { /* parameters */ uint32_t limit; /* Maximal length of backlog: bytes */ @@ -43,10 +50,7 @@ struct tbf_sch_priv { struct qsch_rate peak; /* max burst rate */ /* internal variables */ - int64_t tokens; /* current tokens, in time */ - int64_t ptokens; /* current peak tokens, in time */ - int64_t t_c; /* Time check-point */ - struct Qsch *qsch; /* backlog queue */ + struct tbf_sch_shared *shm; }; static inline bool tbf_peak_present(const struct tbf_sch_priv *priv) @@ -57,29 +61,21 @@ static inline bool tbf_peak_present(const struct tbf_sch_priv *priv) static int tbf_enqueue(struct Qsch *sch, struct rte_mbuf *mbuf) { struct tbf_sch_priv *priv = qsch_priv(sch); - int err; if (unlikely(mbuf->pkt_len > priv->max_size)) { RTE_LOG(WARNING, TC, "%s: packet too big.\n", __func__); return qsch_drop(sch, mbuf); } - assert(priv->qsch); - /* * enqueue is simple: just put into inner backlog queue, * if it's full then drop the packet (by inner queue). */ - err = priv->qsch->ops->enqueue(priv->qsch, mbuf); - if (err != EDPVS_OK) { - sch->this_qstats.drops++; - return err; + if (unlikely(sch->qstats.backlog + mbuf->pkt_len > priv->limit)) { + return qsch_drop(sch, mbuf); } - sch->this_qstats.backlog += mbuf->pkt_len; - sch->this_qstats.qlen++; - sch->this_q.qlen++; - return EDPVS_OK; + return qsch_enqueue_tail(sch, mbuf); } static struct rte_mbuf *tbf_dequeue(struct Qsch *sch) @@ -89,23 +85,30 @@ static struct rte_mbuf *tbf_dequeue(struct Qsch *sch) int64_t now, toks, ptoks; /* need "signed" to compare with 0 */ unsigned int pkt_len; - assert(priv->qsch); - - mbuf = priv->qsch->ops->peek(priv->qsch); + mbuf = qsch_peek_head(sch); if (unlikely(!mbuf)) return NULL; pkt_len = mbuf->pkt_len; + if (!rte_spinlock_trylock(&priv->shm->lock)) + return NULL; // Someone is doing what I want to, let it go. + now = tc_get_ns(); /* "tokens" arrived since last check point, not exceed bucket depth. * note all of them are present in time manner. */ - toks = min_t(int64_t, now - priv->t_c, priv->buffer); + toks = min_t(int64_t, now - priv->shm->t_c, priv->buffer); ptoks = 0; + if (unlikely(toks < 0)) { + rte_spinlock_unlock(&priv->shm->lock); + RTE_LOG(WARNING, TC, "[%d] %s:token producer bug?\n", rte_lcore_id(), __func__); + return NULL; + } + if (tbf_peak_present(priv)) { /* calc peak-tokens with new arrived tokens plus remaining peak-tokens * should not exceed mtu ("minburst") */ - ptoks = toks + priv->ptokens; + ptoks = toks + priv->shm->ptokens; if (ptoks > priv->mtu) ptoks = priv->mtu; /* minus current pkt size to check if ptoks is enough later */ @@ -114,7 +117,7 @@ static struct rte_mbuf *tbf_dequeue(struct Qsch *sch) /* calc tokens with new arrived tokens plus remaining tokens * should not exceed bucket depth ("burst") */ - toks += priv->tokens; + toks += priv->shm->tokens; if (toks > priv->buffer) toks = priv->buffer; /* minus current pkt size to check if toks is enough later */ @@ -124,26 +127,21 @@ static struct rte_mbuf *tbf_dequeue(struct Qsch *sch) * current toks/ptoks was subtracted by pkt_len inadvance. * so < zero means not enough and >= 0 means enough. */ if ((toks|ptoks) >= 0) { - mbuf = qsch_dequeue_head(priv->qsch); + mbuf = qsch_dequeue_head(sch); if (unlikely(!mbuf)) return NULL; /* update variables */ - priv->t_c = now; /* only need update time checkpoint when consumed */ - priv->tokens = toks; - priv->ptokens = ptoks; - - sch->this_qstats.backlog -= pkt_len; - sch->this_qstats.qlen--; - sch->this_q.qlen--; - sch->this_bstats.bytes += pkt_len; - sch->this_bstats.packets++; - + priv->shm->t_c = now; /* only need to update time checkpoint when consumed */ + priv->shm->tokens = toks; + priv->shm->ptokens = ptoks; + rte_spinlock_unlock(&priv->shm->lock); return mbuf; } + rte_spinlock_unlock(&priv->shm->lock); /* token not enough */ - sch->this_qstats.overlimits++; + sch->qstats.overlimits++; return NULL; } @@ -155,7 +153,6 @@ static int tbf_change(struct Qsch *sch, const void *arg) uint64_t max_size; uint32_t limit; struct qsch_rate rate = {}, peak = {}; - struct Qsch *child = NULL; /* set new values or used original */ if (qopt->rate.rate) @@ -203,18 +200,6 @@ static int tbf_change(struct Qsch *sch, const void *arg) return EDPVS_INVAL; } - /* set or create inner backlog queue */ - if (priv->qsch) { - fifo_set_limit(priv->qsch, limit); - } else { - child = fifo_create_dflt(sch, &bfifo_sch_ops, limit); - if (!child) - return EDPVS_INVAL; - - priv->qsch = child; - qsch_hash_add(child, true); - } - /* save values to sch */ priv->limit = limit; /* could be zero (no backlog queue, drop if no token) */ priv->max_size = max_size; @@ -222,8 +207,16 @@ static int tbf_change(struct Qsch *sch, const void *arg) priv->mtu = mtu; priv->rate = rate; priv->peak = peak; - priv->tokens = priv->buffer; - priv->ptokens = priv->mtu; + + if (rte_lcore_id() != g_master_lcore_id) + return EDPVS_OK; + + assert(priv->shm); + rte_spinlock_lock(&priv->shm->lock); + priv->shm->t_c = tc_get_ns(); + priv->shm->tokens = priv->buffer; + priv->shm->ptokens = priv->mtu; + rte_spinlock_unlock(&priv->shm->lock); return EDPVS_OK; } @@ -236,33 +229,33 @@ static int tbf_init(struct Qsch *sch, const void *arg) if (!qopt) return EDPVS_OK; - priv->t_c = tc_get_ns(); + priv->shm = qsch_shm_get_or_create(sch, sizeof(struct tbf_sch_shared)); + if (!priv->shm) + return EDPVS_NOMEM; + return tbf_change(sch, qopt); } static void tbf_destroy(struct Qsch *sch) { - struct tbf_sch_priv *priv = qsch_priv(sch); - if (priv->qsch) - qsch_destroy(priv->qsch); + qsch_shm_put_or_destroy(sch); } static void tbf_reset(struct Qsch *sch) { - lcoreid_t cid; - struct tbf_sch_priv *priv = qsch_priv(sch); + struct tbf_sch_priv *priv; - qsch_reset(priv->qsch); - for (cid = 0; cid < NELEMS(sch->q); cid++) { - sch->qstats[cid].backlog = 0; - sch->qstats[cid].qlen = 0; - sch->q[cid].qlen = 0; - } + qsch_reset_queue(sch); + + if (rte_lcore_id() != g_master_lcore_id) + return; + priv = qsch_priv(sch); - priv->t_c = tc_get_ns(); - priv->tokens = priv->buffer; - priv->ptokens = priv->mtu; - return; + rte_spinlock_lock(&priv->shm->lock); + priv->shm->t_c = tc_get_ns(); + priv->shm->tokens = priv->buffer; + priv->shm->ptokens = priv->mtu; + rte_spinlock_unlock(&priv->shm->lock); } static int tbf_dump(struct Qsch *sch, void *arg) diff --git a/src/tc/tc.c b/src/tc/tc.c index 2eb6284e8..9bae2cdc6 100644 --- a/src/tc/tc.c +++ b/src/tc/tc.c @@ -28,6 +28,9 @@ #include "tc/sch.h" #include "tc/cls.h" +extern int netif_pktpool_nb_mbuf; +extern int netif_pktpool_mbuf_cache; + extern struct Qsch_ops pfifo_sch_ops; extern struct Qsch_ops bfifo_sch_ops; extern struct Qsch_ops pfifo_fast_ops; @@ -35,21 +38,13 @@ extern struct Qsch_ops tbf_sch_ops; extern struct tc_cls_ops match_cls_ops; static struct list_head qsch_ops_base; -static rte_rwlock_t qsch_ops_lock; -static struct Qsch_ops *default_qsch_ops = &pfifo_fast_ops; - static struct list_head cls_ops_base; -static rte_rwlock_t cls_ops_lock; -/* make them configurable only if really needed. */ static int tc_qsch_hash_size = 64; -static int tc_mbuf_pool_size = 8192; /* shared by all Qsch, enough ? */ -static int tc_mbuf_cache_size = 128; static struct rte_mempool *tc_mbuf_pools[DPVS_MAX_SOCKET]; -/* call with qsch_ops_lock */ -static struct Qsch_ops *__qsch_ops_lookup(const char *name) +struct Qsch_ops *tc_qsch_ops_lookup(const char *name) { struct Qsch_ops *ops; @@ -61,60 +56,27 @@ static struct Qsch_ops *__qsch_ops_lookup(const char *name) return NULL; } +/* call on init stage */ int tc_register_qsch(struct Qsch_ops *ops) { - int err = EDPVS_OK; + if (tc_qsch_ops_lookup(ops->name)) + return EDPVS_EXIST; - rte_rwlock_write_lock(&qsch_ops_lock); - if (__qsch_ops_lookup(ops->name)) { - err = EDPVS_EXIST; - } else { - list_add_tail(&ops->list, &qsch_ops_base); - rte_atomic32_set(&ops->refcnt, 1); - } - rte_rwlock_write_unlock(&qsch_ops_lock); - return err; + list_add_tail(&ops->list, &qsch_ops_base); + return EDPVS_OK; } int tc_unregister_qsch(struct Qsch_ops *ops) { - int err = EDPVS_OK; + if (!tc_qsch_ops_lookup(ops->name)) + return EDPVS_NOTEXIST; - rte_rwlock_write_lock(&qsch_ops_lock); - if (rte_atomic32_dec_and_test(&ops->refcnt)) - list_del(&ops->list); - else - err = EDPVS_BUSY; - rte_rwlock_write_unlock(&qsch_ops_lock); + list_del(&ops->list); - return err; -} - -void tc_qsch_ops_get(struct Qsch_ops *ops) -{ - rte_atomic32_inc(&ops->refcnt); -} - -void tc_qsch_ops_put(struct Qsch_ops *ops) -{ - rte_atomic32_dec(&ops->refcnt); -} - -struct Qsch_ops *tc_qsch_ops_lookup(const char *name) -{ - struct Qsch_ops *ops; - - rte_rwlock_read_lock(&qsch_ops_lock); - ops = __qsch_ops_lookup(name); - if (ops) - tc_qsch_ops_get(ops); - rte_rwlock_read_unlock(&qsch_ops_lock); - - return ops; + return EDPVS_OK; } -/* call with cls_ops_lock */ -static struct tc_cls_ops *__cls_ops_lookup(const char *name) +struct tc_cls_ops *tc_cls_ops_lookup(const char *name) { struct tc_cls_ops *ops; @@ -126,51 +88,24 @@ static struct tc_cls_ops *__cls_ops_lookup(const char *name) return NULL; } +/* call on init stage */ int tc_register_cls(struct tc_cls_ops *ops) { - int err = EDPVS_OK; + if (tc_cls_ops_lookup(ops->name)) + return EDPVS_EXIST; - rte_rwlock_write_lock(&cls_ops_lock); - if (__cls_ops_lookup(ops->name)) { - err = EDPVS_EXIST; - } else { - list_add_tail(&ops->list, &cls_ops_base); - rte_atomic32_set(&ops->refcnt, 1); - } - rte_rwlock_write_unlock(&cls_ops_lock); - return err; + list_add_tail(&ops->list, &cls_ops_base); + return EDPVS_OK; } int tc_unregister_cls(struct tc_cls_ops *ops) { - int err = EDPVS_OK; - - rte_rwlock_write_lock(&cls_ops_lock); - if (rte_atomic32_dec_and_test(&ops->refcnt)) - list_del(&ops->list); - else - err = EDPVS_BUSY; - rte_rwlock_write_unlock(&cls_ops_lock); - - return err; -} - -struct tc_cls_ops *tc_cls_ops_get(const char *name) -{ - struct tc_cls_ops *ops; + if (!tc_cls_ops_lookup(ops->name)) + return EDPVS_NOTEXIST; - rte_rwlock_read_lock(&cls_ops_lock); - ops = __cls_ops_lookup(name); - if (ops) - rte_atomic32_inc(&ops->refcnt); - rte_rwlock_read_unlock(&cls_ops_lock); + list_del(&ops->list); - return ops; -} - -void tc_cls_ops_put(struct tc_cls_ops *ops) -{ - rte_atomic32_dec(&ops->refcnt); + return EDPVS_OK; } struct rte_mbuf *tc_handle_egress(struct netif_tc *tc, @@ -269,29 +204,58 @@ struct rte_mbuf *tc_handle_egress(struct netif_tc *tc, return NULL; } -int tc_init_dev(struct netif_port *dev) +static int __tc_destroy_dev(struct netif_port *dev, struct netif_tc *tc) { - int hash, size; - struct netif_tc *tc = netif_tc(dev); + struct Qsch *sch; + struct hlist_node *n; + int hash; - memset(tc, 0, sizeof(*tc)); + if (tc->qsch_hash) { + for (hash = 0; hash < tc->qsch_hash_size; hash++) { + hlist_for_each_entry_safe(sch, n, &tc->qsch_hash[hash], hlist) + qsch_destroy(sch); + } - rte_rwlock_init(&tc->lock); + rte_free(tc->qsch_hash); + } - rte_rwlock_write_lock(&tc->lock); + if (tc->qsch) + qsch_destroy(tc->qsch); - tc->dev = dev; - tc->tc_mbuf_pool = tc_mbuf_pools[dev->socket]; + if (tc->qsch_ingress) + qsch_destroy(tc->qsch_ingress); - /* egress "root" Qsch, which handle is 0, parent is TC_H_ROOT. */ - tc->qsch = qsch_create_dflt(dev, default_qsch_ops, TC_H_ROOT); - if (!tc->qsch) { - rte_rwlock_write_unlock(&tc->lock); - tc_destroy_dev(dev); - return EDPVS_NOMEM; + tc->qsch_cnt = 0; + + return EDPVS_OK; +} + +int tc_destroy_dev(struct netif_port *dev) +{ + int i, err = EDPVS_OK; + + assert(dev); + + for (i = 0; i < DPVS_MAX_LCORE; i++) { + err = __tc_destroy_dev(dev, &dev->tc[i]); + if (err != EDPVS_OK) { + RTE_LOG(WARNING, TC, "%s: fail to destroy %s's tc[%d]\n", __func__, dev->name, i); + } } - tc->qsch_cnt = 1; + return err; +} + +static inline int __tc_init_dev(struct netif_port *dev, struct netif_tc *tc) +{ + int hash, size; + + memset(tc, 0, sizeof(*tc)); + + tc->dev = dev; + tc->tc_mbuf_pool = tc_mbuf_pools[dev->socket]; + + tc->qsch_cnt = 0; tc->qsch_ingress = NULL; tc->qsch_hash_size = tc_qsch_hash_size; @@ -299,66 +263,55 @@ int tc_init_dev(struct netif_port *dev) tc->qsch_hash = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE); if (!tc->qsch_hash) { - rte_rwlock_write_unlock(&tc->lock); - tc_destroy_dev(dev); + __tc_destroy_dev(dev, tc); return EDPVS_NOMEM; } for (hash = 0; hash < tc->qsch_hash_size; hash++) INIT_HLIST_HEAD(&tc->qsch_hash[hash]); - rte_rwlock_write_unlock(&tc->lock); return EDPVS_OK; } -int tc_destroy_dev(struct netif_port *dev) +int tc_init_dev(struct netif_port *dev) { - struct netif_tc *tc = netif_tc(dev); - struct Qsch *sch; - struct hlist_node *n; - int hash; + int i, err = EDPVS_OK; - rte_rwlock_write_lock(&tc->lock); + assert(dev); - if (tc->qsch_hash) { - for (hash = 0; hash < tc->qsch_hash_size; hash++) { - hlist_for_each_entry_safe(sch, n, &tc->qsch_hash[hash], hlist) - qsch_destroy(sch); - } - - rte_free(tc->qsch_hash); + for (i = 0; i < DPVS_MAX_LCORE; i++) { + err = __tc_init_dev(dev, &dev->tc[i]); + if (err != EDPVS_OK) + break; } - if (tc->qsch) - qsch_destroy(tc->qsch); - - if (tc->qsch_ingress) - qsch_destroy(tc->qsch_ingress); - - tc->qsch_cnt = 0; - - rte_rwlock_write_unlock(&tc->lock); + if (err != EDPVS_OK) { + for (--i; i >= 0; i--) { + __tc_destroy_dev(dev, &dev->tc[i]); + } + } - return EDPVS_OK; + return err; } int tc_init(void) { - int s; + int s, err; + int tc_mbuf_pool_size = netif_pktpool_nb_mbuf; + int tc_mbuf_cache_size = netif_pktpool_mbuf_cache; + + if ((err = qsch_shm_init()) != EDPVS_OK) + return err; /* scheduler */ - rte_rwlock_init(&qsch_ops_lock); INIT_LIST_HEAD(&qsch_ops_base); - tc_register_qsch(&pfifo_sch_ops); tc_register_qsch(&bfifo_sch_ops); tc_register_qsch(&pfifo_fast_ops); tc_register_qsch(&tbf_sch_ops); /* classifier */ - rte_rwlock_init(&cls_ops_lock); INIT_LIST_HEAD(&cls_ops_base); - tc_register_cls(&match_cls_ops); /* per-NUMA socket mempools for queued tc_mbuf{} */ @@ -379,3 +332,25 @@ int tc_init(void) return EDPVS_OK; } + +int tc_term(void) +{ + int s; + + tc_unregister_qsch(&pfifo_sch_ops); + tc_unregister_qsch(&bfifo_sch_ops); + tc_unregister_qsch(&pfifo_fast_ops); + tc_unregister_qsch(&tbf_sch_ops); + + tc_unregister_cls(&match_cls_ops); + + for (s = 0; s < get_numa_nodes(); s++) { + if (tc_mbuf_pools[s]) { + rte_mempool_free(tc_mbuf_pools[s]); + } + } + + qsch_shm_term(); + + return EDPVS_OK; +} diff --git a/src/tc/tc_ctrl.c b/src/tc/tc_ctrl.c index 9ca98ca87..ef477cd88 100644 --- a/src/tc/tc_ctrl.c +++ b/src/tc/tc_ctrl.c @@ -28,54 +28,37 @@ #include "tc/sch.h" #include "conf/tc.h" +struct tc_msg_param { + struct netif_port *dev; + sockoptid_t operator; + union tc_param param; +} __attribute__((__packed__)); + +static uint32_t tc_msg_seq(void) +{ + static uint32_t counter = 0; + return counter++; +} + static int fill_qsch_param(struct Qsch *sch, struct tc_qsch_param *pr) { int err; - struct dpvs_msg *req, *reply; - struct dpvs_multicast_queue *replies = NULL; - struct tc_qsch_stats *st; memset(pr, 0, sizeof(*pr)); + pr->cid = rte_lcore_id(); pr->handle = sch->handle; - pr->where = sch->parent; + pr->where = sch->parent; snprintf(pr->kind, sizeof(pr->kind), "%s", sch->ops->name); if (sch->ops->dump && (err = sch->ops->dump(sch, &pr->qopt)) != EDPVS_OK) return err; - /* send msg to workers for per-cpu stats */ - req = msg_make(MSG_TYPE_TC_STATS, 0, DPVS_MSG_MULTICAST, - rte_lcore_id(), sizeof(struct Qsch *), &sch); - if (!req) - return EDPVS_NOMEM; - - err = multicast_msg_send(req, 0, &replies); - if (err != EDPVS_OK) { - RTE_LOG(ERR, TC, "%s: send msg: %s\n", __func__, dpvs_strerror(err)); - msg_destroy(&req); - return err; - } - - /* handle replies */ - list_for_each_entry(reply, &replies->mq, mq_node) { - st = (struct tc_qsch_stats *)reply->data; - assert(st && reply->cid < DPVS_MAX_LCORE); - - pr->qstats_cpus[reply->cid] = st->qstats; - pr->bstats_cpus[reply->cid] = st->bstats; - - pr->qstats.qlen += st->qstats.qlen; - pr->qstats.backlog += st->qstats.backlog; - pr->qstats.drops += st->qstats.drops; - pr->qstats.requeues += st->qstats.requeues; - pr->qstats.overlimits += st->qstats.overlimits; + pr->cls_cnt = sch->cls_cnt; + pr->flags = sch->flags; + pr->qstats = sch->qstats; + pr->bstats = sch->bstats; - pr->bstats.bytes += st->bstats.bytes; - pr->bstats.packets += st->bstats.packets; - } - - msg_destroy(&req); return EDPVS_OK; } @@ -85,8 +68,9 @@ static int fill_cls_param(struct tc_cls *cls, struct tc_cls_param *pr) memset(pr, 0, sizeof(*pr)); - pr->handle = cls->handle; + pr->cid = rte_lcore_id(); pr->sch_id = cls->sch->handle; + pr->handle = cls->handle; snprintf(pr->kind, sizeof(pr->kind), "%s", cls->ops->name); pr->pkt_type = cls->pkt_type; pr->priority = cls->prio; @@ -97,31 +81,33 @@ static int fill_cls_param(struct tc_cls *cls, struct tc_cls_param *pr) return EDPVS_OK; } -/* with tc->lock */ -static int __tc_so_qsch_set(struct netif_tc *tc, sockoptid_t oper, +static int tc_local_qsch_set(struct netif_port *dev, sockoptid_t oper, const struct tc_qsch_param *qpar) { + struct netif_tc *tc; struct Qsch *sch = NULL; tc_handle_t where; int err; + if (!netif_port_get(dev->id)) + return EDPVS_NODEV; + tc = netif_tc(dev); + if (oper == SOCKOPT_TC_DEL || oper == SOCKOPT_TC_CHANGE || oper == SOCKOPT_TC_REPLACE) { sch = qsch_lookup_noref(tc, qpar->handle); if (!sch) return EDPVS_NOTEXIST; - - /* egress root is readonly */ - if (sch == tc->qsch) - return EDPVS_NOTSUPP; } switch (oper) { case SOCKOPT_TC_ADD: - qsch_create(tc->dev, qpar->kind, qpar->where, qpar->handle, - &qpar->qopt, &err); + assert(qpar->handle != 0); + qsch_create(tc->dev, qpar->kind, qpar->where, + qpar->handle, &qpar->qopt, &err); return err; + case SOCKOPT_TC_DEL: qsch_destroy(sch); return EDPVS_OK; @@ -130,41 +116,47 @@ static int __tc_so_qsch_set(struct netif_tc *tc, sockoptid_t oper, return qsch_change(sch, &qpar->qopt); case SOCKOPT_TC_REPLACE: + assert(qpar->handle != 0); /* keep parent unchanged if not indicated */ if (qpar->where == TC_H_UNSPEC) where = sch->parent; else where = qpar->where; - qsch_destroy(sch); - qsch_create(tc->dev, qpar->kind, where, qpar->handle, - &qpar->qopt, &err); + qsch_create(tc->dev, qpar->kind, where, + qpar->handle, &qpar->qopt, &err); return err; default: return EDPVS_NOTSUPP; } + + return EDPVS_OK; } -/* with tc->lock */ -static int __tc_so_qsch_get(struct netif_tc *tc, sockoptid_t oper, +static int tc_local_qsch_get(struct netif_port *dev, sockoptid_t oper, const struct tc_qsch_param *qpar, - union tc_param **arr, int *narr) + union tc_param **arr, uint32_t *narr) { - int nparam, off, h, err; - union tc_param *params = NULL; + struct netif_tc *tc; struct Qsch *sch = NULL; + union tc_param *params = NULL; + int nparam, off, h, err; if (oper != SOCKOPT_TC_SHOW) return EDPVS_INVAL; + if (!netif_port_get(dev->id)) + return EDPVS_NODEV; + tc = netif_tc(dev); + if (qpar->handle != TC_H_UNSPEC) { sch = qsch_lookup_noref(tc, qpar->handle); if (!sch) return EDPVS_NOTEXIST; nparam = 1; - params = rte_malloc(NULL, sizeof(*params), 0); + params = msg_reply_alloc(nparam * sizeof(*params)); /* msg may like it */ if (!params) return EDPVS_NOMEM; @@ -173,8 +165,12 @@ static int __tc_so_qsch_get(struct netif_tc *tc, sockoptid_t oper, goto errout; } else { /* get all Qsch */ nparam = tc->qsch_cnt; + if (!nparam) { + err = EDPVS_OK; + goto errout; + } - params = rte_zmalloc(NULL, nparam * sizeof(*params), 0); + params = msg_reply_alloc(nparam * sizeof(*params)); /* msg may like it */ if (!params) { err = EDPVS_NOMEM; goto errout; @@ -210,23 +206,28 @@ static int __tc_so_qsch_get(struct netif_tc *tc, sockoptid_t oper, *arr = params; *narr = nparam; - return EDPVS_OK; errout: if (params) - rte_free(params); + msg_reply_free(params); + *arr = NULL; + *narr = 0; return err; } -/* with tc->lock */ -static int __tc_so_cls_set(struct netif_tc *tc, sockoptid_t oper, +static int tc_local_cls_set(struct netif_port *dev, sockoptid_t oper, const struct tc_cls_param *cpar) { + struct netif_tc *tc; struct Qsch *sch; struct tc_cls *cls = NULL; int err; + if (!netif_port_get(dev->id)) + return EDPVS_NODEV; + tc = netif_tc(dev); + sch = qsch_lookup_noref(tc, cpar->sch_id); if (!sch) return EDPVS_NOTEXIST; @@ -241,6 +242,7 @@ static int __tc_so_cls_set(struct netif_tc *tc, sockoptid_t oper, switch (oper) { case SOCKOPT_TC_ADD: + assert(cpar->handle != 0); tc_cls_create(sch, cpar->kind, cpar->handle, cpar->pkt_type, cpar->priority, &cpar->copt, &err); return err; @@ -253,6 +255,7 @@ static int __tc_so_cls_set(struct netif_tc *tc, sockoptid_t oper, return tc_cls_change(cls, &cpar->copt); case SOCKOPT_TC_REPLACE: + assert(cpar->handle != 0); tc_cls_destroy(cls); tc_cls_create(sch, cpar->kind, cpar->handle, cpar->pkt_type, cpar->priority, &cpar->copt, &err); @@ -261,21 +264,27 @@ static int __tc_so_cls_set(struct netif_tc *tc, sockoptid_t oper, default: return EDPVS_NOTSUPP; } + + return EDPVS_OK; } -/* with tc->lock */ -static int __tc_so_cls_get(struct netif_tc *tc, sockoptid_t oper, +static int tc_local_cls_get(struct netif_port *dev, sockoptid_t oper, const struct tc_cls_param *cpar, - union tc_param **arr, int *narr) + union tc_param **arr, uint32_t *narr) { + struct netif_tc *tc; struct Qsch *sch; struct tc_cls *cls; int err, nparam, off; - union tc_param *params; + union tc_param *params = NULL; if (oper != SOCKOPT_TC_SHOW) return EDPVS_INVAL; + if (!netif_port_get(dev->id)) + return EDPVS_NODEV; + tc = netif_tc(dev); + sch = qsch_lookup_noref(tc, cpar->sch_id); if (!sch) return EDPVS_NOTEXIST; @@ -286,7 +295,7 @@ static int __tc_so_cls_get(struct netif_tc *tc, sockoptid_t oper, return EDPVS_NOTEXIST; nparam = 1; - params = rte_malloc(NULL, sizeof(*params), 0); + params = msg_reply_alloc(nparam * sizeof(*params)); /* msg may like it */ if (!params) return EDPVS_NOMEM; @@ -295,8 +304,12 @@ static int __tc_so_cls_get(struct netif_tc *tc, sockoptid_t oper, goto errout; } else { nparam = sch->cls_cnt; + if (!nparam) { + err = EDPVS_OK; + goto errout; + } - params = rte_malloc(NULL, nparam * sizeof(*params), 0); + params = msg_reply_alloc(nparam * sizeof(*params)); /* msg may like it */ if (!params) { err = EDPVS_NOMEM; goto errout; @@ -312,12 +325,320 @@ static int __tc_so_cls_get(struct netif_tc *tc, sockoptid_t oper, *arr = params; *narr = nparam; - return EDPVS_OK; errout: if (params) - rte_free(params); + msg_reply_free(params); + *arr = NULL; + *narr = 0; + return err; +} + +static int tc_qsch_set_cb(struct dpvs_msg *msg) +{ + struct tc_msg_param *mpar; + + if (msg->len != sizeof(struct tc_msg_param)) + return EDPVS_INVAL; + mpar = (struct tc_msg_param *)msg->data; + + return tc_local_qsch_set(mpar->dev, mpar->operator, &mpar->param.qsch); +} + +static int tc_qsch_get_cb(struct dpvs_msg *msg) +{ + int err; + uint32_t narr; + union tc_param *arr; + struct tc_msg_param *mpar; + + if (msg->len != sizeof(struct tc_msg_param)) + return EDPVS_INVAL; + mpar = (struct tc_msg_param *)msg->data; + + err = tc_local_qsch_get(mpar->dev, mpar->operator, &mpar->param.qsch, &arr, &narr); + if (err != EDPVS_OK) + return err; + + msg->reply.data = arr; + msg->reply.len= narr * sizeof(*arr); + return EDPVS_OK; +} + +static int tc_cls_set_cb(struct dpvs_msg *msg) +{ + struct tc_msg_param *mpar; + + if (msg->len != sizeof(struct tc_msg_param)) + return EDPVS_INVAL; + mpar = (struct tc_msg_param *)msg->data; + + return tc_local_cls_set(mpar->dev, mpar->operator, &mpar->param.cls); +} + +static int tc_cls_get_cb(struct dpvs_msg *msg) +{ + int err; + uint32_t narr; + union tc_param *arr; + struct tc_msg_param *mpar; + + if (msg->len != sizeof(struct tc_msg_param)) + return EDPVS_INVAL; + mpar = (struct tc_msg_param *)msg->data; + + err = tc_local_cls_get(mpar->dev, mpar->operator, &mpar->param.cls, &arr, &narr); + if (err != EDPVS_OK) + return err; + + msg->reply.data = arr; + msg->reply.len = narr * sizeof(*arr); + return EDPVS_OK; +} + +static int tc_so_qsch_set(struct netif_port *dev, sockoptid_t oper, + const struct tc_qsch_param *qpar) +{ + int err; + struct dpvs_msg *msg; + struct tc_msg_param mpar; + struct tc_qsch_param param = *qpar; + + if (oper == SOCKOPT_TC_ADD || oper == SOCKOPT_TC_REPLACE) { + if (!param.handle) { + param.handle = sch_alloc_handle(netif_tc(dev)); + if (unlikely(!param.handle)) + return EDPVS_RESOURCE; + } + } + + /* set master lcore */ + err = tc_local_qsch_set(dev, oper, ¶m); + if (err != EDPVS_OK) + return err; + + /* set slave lcores */ + mpar.dev = dev; + mpar.operator = oper; + mpar.param.qsch = param; + + msg = msg_make(MSG_TYPE_TC_QSCH_SET, tc_msg_seq(), DPVS_MSG_MULTICAST, + rte_lcore_id(), sizeof(mpar), &mpar); + if (unlikely(!msg)) + return EDPVS_NOMEM; + + err = multicast_msg_send(msg, DPVS_MSG_F_ASYNC, NULL); + if (err != EDPVS_OK) { + msg_destroy(&msg); + return err; + } + + msg_destroy(&msg); + return EDPVS_OK; +} + +static int tc_so_qsch_get(struct netif_port *dev, + sockoptid_t oper, uint32_t flags, + const struct tc_qsch_param *qpar, + union tc_param **arr, uint32_t *narr) +{ + int err, i, off = 0; + struct dpvs_msg *msg, *_msg; + struct dpvs_multicast_queue *reply; + struct tc_msg_param mpar; + + union tc_param *entries, *_arr; + uint32_t nentry, _narr, nqsch; + + err = tc_local_qsch_get(dev, oper, qpar, &_arr, &_narr); + if (err != EDPVS_OK || !_narr) + return err; + + nqsch = nentry = _narr; + if (flags & TC_F_OPS_VERBOSE) + nentry += g_slave_lcore_num * _narr; + entries = rte_zmalloc("tc_qsch_get", nentry * sizeof(*entries), RTE_CACHE_LINE_SIZE); + if (unlikely(!entries)) { + msg_reply_free(_arr); + err = EDPVS_NOMEM; + goto errout; + } + + for (i = 0; i < _narr; i++) + entries[off++] = _arr[i]; + msg_reply_free(_arr); + + if (flags & (TC_F_OPS_STATS|TC_F_OPS_VERBOSE)) { + mpar.dev = dev; + mpar.operator = oper; + mpar.param.qsch = *qpar; + + msg = msg_make(MSG_TYPE_TC_QSCH_GET, tc_msg_seq(), DPVS_MSG_MULTICAST, + rte_lcore_id(), sizeof(mpar), &mpar); + if (unlikely(!msg)) + goto errout; + err = multicast_msg_send(msg, 0, &reply); + if (err != EDPVS_OK) { + msg_destroy(&msg); + goto errout; + } + list_for_each_entry(_msg, &reply->mq, mq_node) { + _arr = (union tc_param *)_msg->data; + _narr = _msg->len/sizeof(*_arr); + if (unlikely(_narr != nqsch)) { + RTE_LOG(WARNING, TC, "%s: tc qsch number does not match -- master=%d, slave[%d]=%d\n", + __func__, nqsch, _msg->cid, _narr); + msg_destroy(&msg); + err = EDPVS_INVAL; + goto errout; + } + for (i = 0; i < _narr; i++) { + if (flags & TC_F_OPS_VERBOSE) + entries[off++] = _arr[i]; + if (flags & TC_F_OPS_STATS) { + entries[i].qsch.qstats.qlen += _arr[i].qsch.qstats.qlen; + entries[i].qsch.qstats.backlog += _arr[i].qsch.qstats.backlog; + entries[i].qsch.qstats.drops += _arr[i].qsch.qstats.drops; + entries[i].qsch.qstats.requeues += _arr[i].qsch.qstats.requeues; + entries[i].qsch.qstats.overlimits += _arr[i].qsch.qstats.overlimits; + entries[i].qsch.bstats.bytes += _arr[i].qsch.bstats.bytes; + entries[i].qsch.bstats.packets += _arr[i].qsch.bstats.packets; + } + } + } + msg_destroy(&msg); + } + + assert(off <= nentry); + *narr = nentry; + *arr = entries; + return EDPVS_OK; + +errout: + if (entries) + rte_free(entries); + *narr = 0; + *arr = NULL; + return err; +} + +static int tc_so_cls_set(struct netif_port *dev, sockoptid_t oper, + const struct tc_cls_param *cpar) +{ + int err; + struct dpvs_msg *msg; + struct tc_msg_param mpar; + struct tc_cls_param param = *cpar; + + if (oper == SOCKOPT_TC_ADD || oper == SOCKOPT_TC_REPLACE) { + if (!param.handle) { + struct Qsch *sch = qsch_lookup_noref(netif_tc(dev), cpar->sch_id); + if (!sch) + return EDPVS_NOTEXIST; + param.handle = cls_alloc_handle(sch); + if (unlikely(!param.handle)) + return EDPVS_RESOURCE; + } + } + + /* set master lcores */ + err = tc_local_cls_set(dev, oper, ¶m); + if (err != EDPVS_OK) + return err; + + /* set slave lcore */ + mpar.dev = dev; + mpar.operator = oper; + mpar.param.cls = param; + + msg = msg_make(MSG_TYPE_TC_CLS_SET, tc_msg_seq(), DPVS_MSG_MULTICAST, + rte_lcore_id(), sizeof(mpar), &mpar); + if (unlikely(!msg)) + return EDPVS_NOMEM; + + err = multicast_msg_send(msg, DPVS_MSG_F_ASYNC, NULL); + if (err != EDPVS_OK) { + msg_destroy(&msg); + return err; + } + + msg_destroy(&msg); + return EDPVS_OK; +} + +static int tc_so_cls_get(struct netif_port *dev, + sockoptid_t oper, uint32_t flags, + const struct tc_cls_param *cpar, + union tc_param **arr, uint32_t *narr) +{ + int err, i, off = 0; + struct dpvs_msg *msg, *_msg; + struct dpvs_multicast_queue *reply; + struct tc_msg_param mpar; + + union tc_param *entries, *_arr; + uint32_t nentry, _narr, ncls; + + err = tc_local_cls_get(dev, oper, cpar, &_arr, &_narr); + if (err != EDPVS_OK || !_narr) + return err; + + ncls = nentry = _narr; + if (flags & TC_F_OPS_VERBOSE) + nentry += g_slave_lcore_num * _narr; + entries = rte_zmalloc("tc_cls_get", nentry * sizeof(*entries), RTE_CACHE_LINE_SIZE); + if (unlikely(!entries)) { + msg_reply_free(_arr); + err = EDPVS_NOMEM; + goto errout; + } + + for (i = 0; i < _narr; i++) + entries[off++] = _arr[i]; + msg_reply_free(_arr); + + if (flags & TC_F_OPS_VERBOSE) { + mpar.dev = dev; + mpar.operator = oper; + mpar.param.cls = *cpar; + + msg = msg_make(MSG_TYPE_TC_CLS_GET, tc_msg_seq(), DPVS_MSG_MULTICAST, + rte_lcore_id(), sizeof(mpar), &mpar); + if (unlikely(!msg)) + goto errout; + err = multicast_msg_send(msg, 0, &reply); + if (err != EDPVS_OK) { + msg_destroy(&msg); + goto errout; + } + list_for_each_entry(_msg, &reply->mq, mq_node) { + _arr = (union tc_param *)_msg->data; + _narr = _msg->len/sizeof(*_arr); + if (unlikely(_narr != ncls)) { + RTE_LOG(WARNING, TC, "%s: tc cls number does not match -- master=%d, slave[%d]=%d\n", + __func__, ncls, _msg->cid, _narr); + msg_destroy(&msg); + err = EDPVS_INVAL; + goto errout; + } + for (i = 0; i < _narr; i++) { + entries[off++] = _arr[i]; + } + } + msg_destroy(&msg); + } + + assert(off <= nentry); + *narr = nentry; + *arr = entries; + return EDPVS_OK; + +errout: + if (entries) + rte_free(entries); + *narr = 0; + *arr = NULL; return err; } @@ -325,7 +646,6 @@ static int tc_sockopt_set(sockoptid_t opt, const void *conf, size_t size) { const struct tc_conf *cf = conf; int err = EDPVS_INVAL; - struct netif_tc *tc; struct netif_port *dev; if (!conf || size < sizeof(*cf)) @@ -334,21 +654,18 @@ static int tc_sockopt_set(sockoptid_t opt, const void *conf, size_t size) dev = netif_port_get_by_name(cf->ifname); if (!dev) return EDPVS_NODEV; - tc = netif_tc(dev); - rte_rwlock_write_lock(&tc->lock); switch (cf->obj) { case TC_OBJ_QSCH: - err = __tc_so_qsch_set(tc, opt, &cf->param.qsch); + err = tc_so_qsch_set(dev, opt, &cf->param.qsch); break; case TC_OBJ_CLS: - err = __tc_so_cls_set(tc, opt, &cf->param.cls); + err = tc_so_cls_set(dev, opt, &cf->param.cls); break; default: err = EDPVS_NOTSUPP; break; } - rte_rwlock_write_unlock(&tc->lock); return err; } @@ -356,10 +673,10 @@ static int tc_sockopt_get(sockoptid_t opt, const void *conf, size_t size, void **out, size_t *outsize) { const struct tc_conf *cf = conf; - struct netif_tc *tc; struct netif_port *dev; union tc_param *param_arr = NULL; - int nparam = 0, err; + uint32_t nparam = 0; + int err; if (!conf || size < sizeof(*cf) || !out || !outsize) return EDPVS_INVAL; @@ -367,16 +684,14 @@ static int tc_sockopt_get(sockoptid_t opt, const void *conf, size_t size, dev = netif_port_get_by_name(cf->ifname); if (!dev) return EDPVS_NODEV; - tc = netif_tc(dev); - rte_rwlock_read_lock(&tc->lock); switch (cf->obj) { case TC_OBJ_QSCH: - err = __tc_so_qsch_get(tc, opt, &cf->param.qsch, + err = tc_so_qsch_get(dev, opt, cf->op_flags, &cf->param.qsch, ¶m_arr, &nparam); break; case TC_OBJ_CLS: - err = __tc_so_cls_get(tc, opt, &cf->param.cls, + err = tc_so_cls_get(dev, opt, cf->op_flags, &cf->param.cls, ¶m_arr, &nparam); break; default: @@ -389,7 +704,6 @@ static int tc_sockopt_get(sockoptid_t opt, const void *conf, size_t size, *outsize = nparam * sizeof(union tc_param); } - rte_rwlock_read_unlock(&tc->lock); return err; } @@ -403,49 +717,71 @@ static struct dpvs_sockopts tc_sockopts = { .get = tc_sockopt_get, }; -static int tc_msg_get_stats(struct dpvs_msg *msg) -{ - void *ptr; - struct Qsch *qsch; - struct tc_qsch_stats *st; - - assert(msg && msg->len == sizeof(struct Qsch *)); - - ptr = msg->data; - qsch = *(struct Qsch **)ptr; - - st = msg_reply_alloc(sizeof(*st)); - if (!st) - return EDPVS_NOMEM; - - st->qstats = qsch->this_qstats; - st->bstats = qsch->this_bstats; - - msg->reply.len = sizeof(*st); - msg->reply.data = st; - - return EDPVS_OK; -} - -static struct dpvs_msg_type tc_stats_msg = { - .type = MSG_TYPE_TC_STATS, - .prio = MSG_PRIO_LOW, - .unicast_msg_cb = tc_msg_get_stats, +static struct dpvs_msg_type tc_msg_types[] = { + { + .type = MSG_TYPE_TC_QSCH_GET, + .prio = MSG_PRIO_LOW, + .mode = DPVS_MSG_MULTICAST, + .unicast_msg_cb = tc_qsch_get_cb, + }, + { + .type = MSG_TYPE_TC_QSCH_SET, + .prio = MSG_PRIO_NORM, + .mode = DPVS_MSG_MULTICAST, + .unicast_msg_cb = tc_qsch_set_cb, + }, + { + .type = MSG_TYPE_TC_CLS_GET, + .prio = MSG_PRIO_LOW, + .mode = DPVS_MSG_MULTICAST, + .unicast_msg_cb = tc_cls_get_cb, + }, + { + .type = MSG_TYPE_TC_CLS_SET, + .prio = MSG_PRIO_LOW, + .mode = DPVS_MSG_MULTICAST, + .unicast_msg_cb = tc_cls_set_cb, + }, }; int tc_ctrl_init(void) { - int err; + int i, err; err = sockopt_register(&tc_sockopts); if (err != EDPVS_OK) return err; - err = msg_type_mc_register(&tc_stats_msg); + for (i = 0; i < NELEMS(tc_msg_types); i++) { + err = msg_type_mc_register(&tc_msg_types[i]); + if (err != EDPVS_OK) + break; + } if (err != EDPVS_OK) { + for (--i; i >=0; i--) + msg_type_mc_unregister(&tc_msg_types[i]); sockopt_unregister(&tc_sockopts); return err; } return EDPVS_OK; } + +int tc_ctrl_term(void) +{ + int i, err; + + for (i = 0; i < NELEMS(tc_msg_types); i++) { + err = msg_type_mc_unregister(&tc_msg_types[i]); + if (err != EDPVS_OK) + RTE_LOG(ERR, TC, "%s: fail to unregister tc_msg_types[%d]\n", __func__, i); + } + + err = sockopt_unregister(&tc_sockopts); + if (err != EDPVS_OK) { + RTE_LOG(ERR, TC, "%s: fail to unregister tc_sockopts\n", __func__); + return err; + } + + return EDPVS_OK; +} diff --git a/tools/dpip/cls.c b/tools/dpip/cls.c index 1756a182c..7fbce1b8f 100644 --- a/tools/dpip/cls.c +++ b/tools/dpip/cls.c @@ -39,7 +39,7 @@ static void cls_help(void) " [ CLS_TYPE [ COPTIONS ] ]\n" "\n" "Parameters:\n" - " PKTTYPE := { ipv4 | vlan }\n" + " PKTTYPE := { ipv4 | ipv6 | vlan }\n" " CLS_TYPE := { match }\n" " COPTIONS := { MATCH_OPTS }\n" " PRIO := NUMBER\n" @@ -67,12 +67,16 @@ static void cls_help(void) ); } -static void cls_dump_param(const char *ifname, const union tc_param *param) +static void cls_dump_param(const char *ifname, const union tc_param *param, + bool stats, bool verbose) { const struct tc_cls_param *cls = ¶m->cls; char handle[16], sch_id[16]; - printf("cls %s %s dev %s %s pkttype 0x%04x prio %d ", + if (verbose) + printf("[%02d] ", cls->cid); + + printf("cls %s %s dev %s qsch %s pkttype 0x%04x prio %d ", cls->kind, tc_handle_itoa(cls->handle, handle, sizeof(handle)), ifname, tc_handle_itoa(cls->sch_id, sch_id, sizeof(sch_id)), cls->pkt_type, cls->priority); @@ -104,7 +108,7 @@ static int cls_parse(struct dpip_obj *obj, struct dpip_conf *cf) /* default values */ param->pkt_type = ETH_P_IP; param->handle = TC_H_UNSPEC; - param->sch_id = TC_H_ROOT; /* invalid qsch handle */ + param->sch_id = TC_H_ROOT; param->priority = 0; while (cf->argc > 0) { @@ -121,6 +125,8 @@ static int cls_parse(struct dpip_obj *obj, struct dpip_conf *cf) NEXTARG_CHECK(cf, CURRARG(cf)); if (strcasecmp(CURRARG(cf), "ipv4") == 0) param->pkt_type = ETH_P_IP; + else if (strcasecmp(CURRARG(cf), "ipv6") == 0) + param->pkt_type = ETH_P_IPV6; else if (strcasecmp(CURRARG(cf), "vlan") == 0) param->pkt_type = ETH_P_8021Q; else { @@ -151,7 +157,7 @@ static int cls_parse(struct dpip_obj *obj, struct dpip_conf *cf) m->result.sch_id = tc_handle_atoi(CURRARG(cf)); } } else { - fprintf(stderr, "invalid/miss cls type: `%s'\n", param->kind); + fprintf(stderr, "invalid/miss cls type: '%s'\n", param->kind); return EDPVS_INVAL; } } @@ -235,6 +241,12 @@ static int cls_do_cmd(struct dpip_obj *obj, dpip_cmd_t cmd, int err, i; size_t size; + if (conf->stats) + tc_conf->op_flags |= TC_F_OPS_STATS; + + if (conf->verbose) + tc_conf->op_flags |= TC_F_OPS_VERBOSE; + switch (cmd) { case DPIP_CMD_ADD: return dpvs_setsockopt(SOCKOPT_TC_ADD, tc_conf, @@ -261,7 +273,7 @@ static int cls_do_cmd(struct dpip_obj *obj, dpip_cmd_t cmd, } for (i = 0; i < size / sizeof(*params); i++) - cls_dump_param(tc_conf->ifname, ¶ms[i]); + cls_dump_param(tc_conf->ifname, ¶ms[i], conf->stats, conf->verbose); dpvs_sockopt_msg_free(params); return EDPVS_OK; diff --git a/tools/dpip/ipset.c b/tools/dpip/ipset.c old mode 100755 new mode 100644 diff --git a/tools/dpip/qsch.c b/tools/dpip/qsch.c index cb2e8b7c8..acc58064d 100644 --- a/tools/dpip/qsch.c +++ b/tools/dpip/qsch.c @@ -39,7 +39,7 @@ static void qsch_help(void) " [ QSCH_KIND [ QOPTIONS ] ]\n" "\n" "Parameters:\n" - " QSCH_KIND := { [b|p]fifo | tbf }\n" + " QSCH_KIND := { [b|p]fifo | pfifo_fast | tbf }\n" " QOPTIONS := { FIFO_OPTS | TBF_OPTS }\n" " FIFO_OPTS := [ limit NUMBER ]\n" " TBF_OPTS := rate RATE burst BYTES { latency MS | limit BYTES }\n" @@ -134,15 +134,17 @@ static int qsch_parse(struct dpip_obj *obj, struct dpip_conf *cf) NEXTARG_CHECK(cf, CURRARG(cf)); param->handle = tc_handle_atoi(CURRARG(cf)); } else if (strcmp(CURRARG(cf), "root") == 0) { - param->where = TC_H_ROOT; + param->handle = TC_H_ROOT; + param->where = TC_H_UNSPEC; } else if (strcmp(CURRARG(cf), "ingress") == 0) { - param->where = TC_H_INGRESS; param->handle = TC_H_INGRESS; + param->where = TC_H_UNSPEC; } else if (strcmp(CURRARG(cf), "parent") == 0) { NEXTARG_CHECK(cf, CURRARG(cf)); param->where = tc_handle_atoi(CURRARG(cf)); } else if (strcmp(CURRARG(cf), "bfifo") == 0 || strcmp(CURRARG(cf), "pfifo") == 0 || + strcmp(CURRARG(cf), "pfifo_fast") == 0 || strcmp(CURRARG(cf), "tbf") == 0) { snprintf(param->kind, TCNAMESIZ, "%s", CURRARG(cf)); } else { /* kind must be set ahead then QOPTIONS */ @@ -151,7 +153,7 @@ static int qsch_parse(struct dpip_obj *obj, struct dpip_conf *cf) NEXTARG_CHECK(cf, CURRARG(cf)); param->qopt.fifo.limit = atoi(CURRARG(cf)); } else { - fprintf(stderr, "invalid option for %s: `%s'\n", + fprintf(stderr, "invalid option for %s: '%s'\n", param->kind, CURRARG(cf)); return EDPVS_INVAL; } @@ -160,7 +162,7 @@ static int qsch_parse(struct dpip_obj *obj, struct dpip_conf *cf) NEXTARG_CHECK(cf, CURRARG(cf)); param->qopt.tbf.rate.rate = rate_atoi(CURRARG(cf)); if (!param->qopt.tbf.rate.rate) { - fprintf(stderr, "invalid rate: `%s'\n", CURRARG(cf)); + fprintf(stderr, "invalid rate: '%s'\n", CURRARG(cf)); return EDPVS_INVAL; } } else if (strcmp(CURRARG(cf), "burst") == 0) { @@ -184,7 +186,7 @@ static int qsch_parse(struct dpip_obj *obj, struct dpip_conf *cf) NEXTARG_CHECK(cf, CURRARG(cf)); param->qopt.tbf.peakrate.rate = rate_atoi(CURRARG(cf)); if (!param->qopt.tbf.peakrate.rate) { - fprintf(stderr, "invalid peakrate: `%s'\n", + fprintf(stderr, "invalid peakrate: '%s'\n", CURRARG(cf)); return EDPVS_INVAL; } @@ -192,12 +194,14 @@ static int qsch_parse(struct dpip_obj *obj, struct dpip_conf *cf) NEXTARG_CHECK(cf, CURRARG(cf)); param->qopt.tbf.mtu = atoi(CURRARG(cf)); } else { - fprintf(stderr, "invalid option for %s: `%s'\n", + fprintf(stderr, "invalid option for %s: '%s'\n", param->kind, CURRARG(cf)); return EDPVS_INVAL; } + } else if (strcmp(param->kind, "pfifo_fast") == 0) { + ; // pfifo_fast doesn't have any param } else { - fprintf(stderr, "invalid/miss qsch kind: `%s'\n", param->kind); + fprintf(stderr, "invalid/miss qsch kind: '%s'\n", param->kind); return EDPVS_INVAL; } } @@ -238,6 +242,8 @@ static int qsch_check(const struct dpip_obj *obj, dpip_cmd_t cmd) fprintf(stderr, "missing buffer for tbf.\n"); return EDPVS_INVAL; } + } else if (strcmp(param->kind, "pfifo_fast") == 0) { + ; } else { fprintf(stderr, "invalid qsch kind.\n"); return EDPVS_INVAL; @@ -256,7 +262,7 @@ static int qsch_check(const struct dpip_obj *obj, dpip_cmd_t cmd) if (strcmp(param->kind, "pfifo") != 0 && strcmp(param->kind, "bfifo") != 0 && strcmp(param->kind, "tbf") != 0) { - fprintf(stderr, "invalid qsch kind.\n"); + fprintf(stderr, "qsch kind '%s' doesn't support SET.\n", param->kind); return EDPVS_INVAL; } break; @@ -282,8 +288,8 @@ static void qsch_dump_stats(const char *prefix, const struct qsch_qstats *qs, "(dropped %u, overlimits %u requeues %u)\n", prefix ? : "", bs->bytes, bs->packets, qs->drops, qs->overlimits, qs->requeues); - printf("%sbacklog %uB %up requeues %u\n", - prefix ? : "", qs->backlog, qs->qlen, qs->requeues); + printf("%sBacklog %u bytes %u pkts\n", + prefix ? : "", qs->backlog, qs->qlen); } static void qsch_dump_param(const char *ifname, const union tc_param *param, @@ -293,15 +299,19 @@ static void qsch_dump_param(const char *ifname, const union tc_param *param, const struct tc_qsch_param *qsch = ¶m->qsch; int i; - printf("qsch %s %s dev %s %s", qsch->kind, + if (verbose) + printf("[%02d] ", qsch->cid); + + printf("qsch %s %s dev %s parent %s flags 0x%x cls %d", qsch->kind, tc_handle_itoa(qsch->handle, handle, sizeof(handle)), ifname, - tc_handle_itoa(qsch->where, where, sizeof(where))); + tc_handle_itoa(qsch->where, where, sizeof(where)), + qsch->flags, qsch->cls_cnt); if (strcmp(qsch->kind, "bfifo") == 0 || strcmp(qsch->kind, "pfifo") == 0) { printf(" limit %u", qsch->qopt.fifo.limit); } else if (strcmp(qsch->kind, "pfifo_fast") == 0) { - printf(" bands %u priomap ", qsch->qopt.prio.bands); + printf(" bands %u priomap", qsch->qopt.prio.bands); for (i = 0; i <= TC_PRIO_MAX; i++) printf(" %u", qsch->qopt.prio.priomap[i]); } else if (strcmp(qsch->kind, "tbf") == 0) { @@ -317,20 +327,8 @@ static void qsch_dump_param(const char *ifname, const union tc_param *param, } printf("\n"); - if (stats) { + if (stats) qsch_dump_stats(" ", &qsch->qstats, &qsch->bstats); - - if (verbose) { - /* dump per-cpu statistics */ - for (i = 0; i < DPVS_MAX_LCORE; i++) { - char cpu[10]; - snprintf(cpu, sizeof(cpu), " [%02d]", i); - - qsch_dump_stats(cpu, &qsch->qstats_cpus[i], - &qsch->bstats_cpus[i]); - } - } - } } static int qsch_do_cmd(struct dpip_obj *obj, dpip_cmd_t cmd, @@ -341,6 +339,12 @@ static int qsch_do_cmd(struct dpip_obj *obj, dpip_cmd_t cmd, int err, i; size_t size; + if (conf->stats) + tc_conf->op_flags |= TC_F_OPS_STATS; + + if (conf->verbose) + tc_conf->op_flags |= TC_F_OPS_VERBOSE; + switch (cmd) { case DPIP_CMD_ADD: return dpvs_setsockopt(SOCKOPT_TC_ADD, tc_conf, diff --git a/tools/dpip/route.c b/tools/dpip/route.c old mode 100755 new mode 100644 From b5f267a9b6ceb2557162ccf469c2e29f1ee2ed4a Mon Sep 17 00:00:00 2001 From: ywc689 Date: Tue, 9 Mar 2021 17:35:34 +0800 Subject: [PATCH 21/35] tc: schedule tc qsch with dpvs job Signed-off-by: ywc689 --- include/tc/sch.h | 9 +++++++ src/tc/sch_generic.c | 56 ++++++++++++++++++++++++++++++++++++++++++++ src/tc/tc.c | 4 ++-- 3 files changed, 67 insertions(+), 2 deletions(-) diff --git a/include/tc/sch.h b/include/tc/sch.h index 86942f9c7..ba0b505b9 100644 --- a/include/tc/sch.h +++ b/include/tc/sch.h @@ -78,6 +78,7 @@ struct Qsch { struct list_head cls_list; /* classifiers */ int cls_cnt; struct hlist_node hlist; /* netif_tc.qsch_hash node */ + struct list_head list_node; /* qsch_head list node */ struct netif_tc *tc; uint32_t refcnt; @@ -165,6 +166,9 @@ static inline struct rte_mbuf *__qsch_dequeue_head(struct Qsch *sch, struct tc_m struct tc_mbuf *tm; struct rte_mbuf *mbuf; + if (list_empty(&q->mbufs)) + return NULL; + tm = list_first_entry(&q->mbufs, struct tc_mbuf, list); if (unlikely(!tm)) return NULL; @@ -197,6 +201,9 @@ static inline struct rte_mbuf *qsch_peek_head(struct Qsch *sch) { struct tc_mbuf *tm; + if (list_empty(&sch->q.mbufs)) + return NULL; + tm = list_first_entry(&sch->q.mbufs, struct tc_mbuf, list); if (unlikely(!tm)) return NULL; @@ -268,6 +275,8 @@ void *qsch_shm_get_or_create(struct Qsch *sch, uint32_t len); int qsch_shm_put_or_destroy(struct Qsch *sch); int qsch_shm_init(void); int qsch_shm_term(void); +int qsch_init(void); +int qsch_term(void); #endif /* __DPVS__ */ diff --git a/src/tc/sch_generic.c b/src/tc/sch_generic.c index 81d36eb15..50e2bf39b 100644 --- a/src/tc/sch_generic.c +++ b/src/tc/sch_generic.c @@ -26,10 +26,14 @@ #include "netif.h" #include "tc/tc.h" #include "tc/sch.h" +#include "scheduler.h" /* may configurable in the future. */ static int dev_tx_weight = 64; +static struct list_head qsch_head[DPVS_MAX_LCORE]; +#define this_qsch_head qsch_head[rte_lcore_id()] + static inline int sch_hash(tc_handle_t handle, int hash_size) { return handle % hash_size; @@ -190,6 +194,8 @@ void qsch_hash_add(struct Qsch *sch, bool invisible) hlist_add_head(&sch->hlist, &sch->tc->qsch_hash[hash]); } + list_add_tail(&sch->list_node, &this_qsch_head); + sch->tc->qsch_cnt++; if (invisible) @@ -208,6 +214,8 @@ void qsch_hash_del(struct Qsch *sch) hlist_del_init(&sch->hlist); } + list_del(&sch->list_node); + sch->tc->qsch_cnt--; } @@ -253,3 +261,51 @@ void qsch_do_sched(struct Qsch *sch) return; } + +static void qsch_sched_all(void *dummy) +{ + struct Qsch *sch; + lcoreid_t cid = rte_lcore_id(); + + list_for_each_entry(sch, &qsch_head[cid], list_node) { + if (sch->flags & QSCH_F_INGRESS) { + if (sch->tc->dev->flag & NETIF_PORT_FLAG_TC_INGRESS) + qsch_do_sched(sch); + } else { + if (sch->tc->dev->flag & NETIF_PORT_FLAG_TC_EGRESS) + qsch_do_sched(sch); + } + } +} + +static struct dpvs_lcore_job qsch_sched_job = { + .name = "qsch_sched", + .func = qsch_sched_all, + .data = NULL, + .type = LCORE_JOB_LOOP, +}; + +int qsch_init(void) +{ + int i, err; + + for (i = 0; i < DPVS_MAX_LCORE; i++) + INIT_LIST_HEAD(&qsch_head[i]); + + err = dpvs_lcore_job_register(&qsch_sched_job, LCORE_ROLE_FWD_WORKER); + if (err != EDPVS_OK) + return err; + + return qsch_shm_init(); +} + +int qsch_term(void) +{ + int err; + + err = dpvs_lcore_job_unregister(&qsch_sched_job, LCORE_ROLE_FWD_WORKER); + if (err != EDPVS_OK) + return err; + + return qsch_shm_term(); +} diff --git a/src/tc/tc.c b/src/tc/tc.c index 9bae2cdc6..6759ddde1 100644 --- a/src/tc/tc.c +++ b/src/tc/tc.c @@ -300,7 +300,7 @@ int tc_init(void) int tc_mbuf_pool_size = netif_pktpool_nb_mbuf; int tc_mbuf_cache_size = netif_pktpool_mbuf_cache; - if ((err = qsch_shm_init()) != EDPVS_OK) + if ((err = qsch_init()) != EDPVS_OK) return err; /* scheduler */ @@ -350,7 +350,7 @@ int tc_term(void) } } - qsch_shm_term(); + qsch_term(); return EDPVS_OK; } From 02cdcfd38be10374a0f23b7c935479dcfc5a5608 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Fri, 12 Mar 2021 20:27:34 +0800 Subject: [PATCH 22/35] netif: refactor netif recv procedure Signed-off-by: ywc689 --- include/ipvs/redirect.h | 2 +- include/netif.h | 6 +- src/ipvs/ip_vs_redirect.c | 4 +- src/netif.c | 238 ++++++++++++++++++-------------------- 4 files changed, 120 insertions(+), 130 deletions(-) diff --git a/include/ipvs/redirect.h b/include/ipvs/redirect.h index 3f2169b8f..01f990c4d 100644 --- a/include/ipvs/redirect.h +++ b/include/ipvs/redirect.h @@ -54,7 +54,7 @@ struct dp_vs_redirect *dp_vs_redirect_get(int af, uint16_t proto, void dp_vs_redirect_init(struct dp_vs_conn *conn); int dp_vs_redirect_table_init(void); int dp_vs_redirect_pkt(struct rte_mbuf *mbuf, lcoreid_t peer_cid); -void dp_vs_redirect_ring_proc(struct netif_queue_conf *qconf, lcoreid_t cid); +void dp_vs_redirect_ring_proc(lcoreid_t cid); int dp_vs_redirects_init(void); int dp_vs_redirects_term(void); diff --git a/include/netif.h b/include/netif.h index 3098d9bd9..54ced17f9 100644 --- a/include/netif.h +++ b/include/netif.h @@ -265,8 +265,10 @@ int netif_register_master_xmit_msg(void); int netif_lcore_conf_set(int lcores, const struct netif_lcore_conf *lconf); bool is_lcore_id_valid(lcoreid_t cid); bool netif_lcore_is_fwd_worker(lcoreid_t cid); -void lcore_process_packets(struct netif_queue_conf *qconf, struct rte_mbuf **mbufs, - lcoreid_t cid, uint16_t count, bool pkts_from_ring); +void lcore_process_packets(struct rte_mbuf **mbufs, lcoreid_t cid, + uint16_t count, bool pkts_from_ring); +int netif_rcv_mbuf(struct netif_port *dev, lcoreid_t cid, + struct rte_mbuf *mbuf, bool pkts_from_ring); /************************** protocol API *****************************/ int netif_register_pkt(struct pkt_type *pt); diff --git a/src/ipvs/ip_vs_redirect.c b/src/ipvs/ip_vs_redirect.c index f5cb12e52..361da9016 100644 --- a/src/ipvs/ip_vs_redirect.c +++ b/src/ipvs/ip_vs_redirect.c @@ -243,7 +243,7 @@ int dp_vs_redirect_pkt(struct rte_mbuf *mbuf, lcoreid_t peer_cid) return INET_STOLEN; } -void dp_vs_redirect_ring_proc(struct netif_queue_conf *qconf, lcoreid_t cid) +void dp_vs_redirect_ring_proc(lcoreid_t cid) { struct rte_mbuf *mbufs[NETIF_MAX_PKT_BURST]; uint16_t nb_rb; @@ -261,7 +261,7 @@ void dp_vs_redirect_ring_proc(struct netif_queue_conf *qconf, lcoreid_t cid) (void**)mbufs, NETIF_MAX_PKT_BURST, NULL); if (nb_rb > 0) { - lcore_process_packets(qconf, mbufs, cid, nb_rb, 1); + lcore_process_packets(mbufs, cid, nb_rb, 1); } } } diff --git a/src/netif.c b/src/netif.c index 8407ed576..90af4c829 100644 --- a/src/netif.c +++ b/src/netif.c @@ -78,6 +78,9 @@ static portid_t port_id_end = 0; static uint16_t g_nports; +/*for arp process*/ +static struct rte_ring *arp_ring[DPVS_MAX_LCORE]; + #define NETIF_BOND_MODE_DEF BONDING_MODE_ROUND_ROBIN struct port_conf_stream { @@ -1068,7 +1071,7 @@ int netif_unregister_pkt(struct pkt_type *pt) return EDPVS_NOTEXIST; } -static struct pkt_type *pkt_type_get(uint16_t type, struct netif_port *port) +static struct pkt_type *pkt_type_get(__be16 type, struct netif_port *port) { struct pkt_type *pt; int hash; @@ -2234,96 +2237,141 @@ int netif_rcv(struct netif_port *dev, __be16 eth_type, struct rte_mbuf *mbuf) return pt->func(mbuf, dev); } -/*for arp process*/ -static struct rte_ring *arp_ring[DPVS_MAX_LCORE]; +static int netif_deliver_mbuf(struct netif_port *dev, lcoreid_t cid, + struct rte_mbuf *mbuf, bool pkts_from_ring) +{ + struct ether_hdr *eth_hdr; + + assert(mbuf->port <= NETIF_MAX_PORTS); + assert(dev != NULL); + + eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + /* reuse mbuf.packet_type, it was RTE_PTYPE_XXX */ + mbuf->packet_type = eth_type_parse(eth_hdr, dev); + + /* + * In NETIF_PORT_FLAG_FORWARD2KNI mode. + * All packets received are deep copied and sent to KNI + * for the purpose of capturing forwarding packets.Since the + * rte_mbuf will be modified in the following procedure, + * we should use mbuf_copy instead of rte_pktmbuf_clone. + */ + if (dev->flag & NETIF_PORT_FLAG_FORWARD2KNI) { + struct rte_mbuf *mbuf_copied = mbuf_copy(mbuf, pktmbuf_pool[dev->socket]); + if (likely(mbuf_copied)) + kni_ingress(mbuf_copied, dev); + else + RTE_LOG(WARNING, NETIF, "%s: failed to copy mbuf for kni\n", __func__); + } + + if (!pkts_from_ring && (dev->flag & NETIF_PORT_FLAG_TC_INGRESS)) { + // TODO + // TC INGRESS HOOK + } + + return netif_rcv_mbuf(dev, cid, mbuf, pkts_from_ring); +} -static inline int netif_deliver_mbuf(struct rte_mbuf *mbuf, - uint16_t eth_type, - struct netif_port *dev, - struct netif_queue_conf *qconf, - bool forward2kni, - lcoreid_t cid, - bool pkts_from_ring) +int netif_rcv_mbuf(struct netif_port *dev, lcoreid_t cid, struct rte_mbuf *mbuf, bool pkts_from_ring) { + struct ether_hdr *eth_hdr; struct pkt_type *pt; int err; uint16_t data_off; + bool forward2kni; - assert(mbuf->port <= NETIF_MAX_PORTS); - assert(dev != NULL); + eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + /* + * do not drop pkt to other hosts (ETH_PKT_OTHERHOST) + * since virtual devices may have different MAC with + * underlying device. + */ - pt = pkt_type_get(eth_type, dev); + /* + * handle VLAN + * if HW offload vlan strip, it's still need vlan module + * to act as VLAN filter. + */ + if (eth_hdr->ether_type == htons(ETH_P_8021Q) || + mbuf->ol_flags & PKT_RX_VLAN_STRIPPED) { + if (vlan_rcv(mbuf, netif_port_get(mbuf->port)) != EDPVS_OK) + goto drop; + dev = netif_port_get(mbuf->port); + if (unlikely(!dev)) + goto drop; + eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + } + forward2kni = (dev->flag & NETIF_PORT_FLAG_FORWARD2KNI) ? true : false; + pt = pkt_type_get(eth_hdr->ether_type, dev); if (NULL == pt) { - if (!forward2kni) + if (!forward2kni) { kni_ingress(mbuf, dev); - else - rte_pktmbuf_free(mbuf); - return EDPVS_OK; + goto done; + } + goto drop; } - /*clone arp pkt to every queue*/ - if (pt->type == rte_cpu_to_be_16(ETHER_TYPE_ARP) && !pkts_from_ring) { - struct rte_mempool *mbuf_pool; - struct rte_mbuf *mbuf_clone; + /* clone arp pkt to every queue */ + if (unlikely(pt->type == rte_cpu_to_be_16(ETHER_TYPE_ARP) && !pkts_from_ring)) { uint8_t i; struct arp_hdr *arp; - unsigned socket_id; - - socket_id = rte_socket_id(); - mbuf_pool = pktmbuf_pool[socket_id]; + struct rte_mbuf *mbuf_clone; - rte_pktmbuf_adj(mbuf, sizeof(struct ether_hdr)); - arp = rte_pktmbuf_mtod(mbuf, struct arp_hdr *); - rte_pktmbuf_prepend(mbuf,(uint16_t)sizeof(struct ether_hdr)); + arp = rte_pktmbuf_mtod_offset(mbuf, struct arp_hdr *, sizeof(struct ether_hdr)); if (rte_be_to_cpu_16(arp->arp_op) == ARP_OP_REPLY) { for (i = 0; i < DPVS_MAX_LCORE; i++) { if ((i == cid) || (!is_lcore_id_fwd(i)) || (i == rte_get_master_lcore())) continue; - /*rte_pktmbuf_clone will not clone pkt.data, just copy pointer!*/ - mbuf_clone = rte_pktmbuf_clone(mbuf, mbuf_pool); - if (mbuf_clone) { - int ret = rte_ring_enqueue(arp_ring[i], mbuf_clone); - if (unlikely(-EDQUOT == ret)) { - RTE_LOG(WARNING, NETIF, "%s: arp ring of lcore %d quota exceeded\n", - __func__, i); - } - else if (ret < 0) { - RTE_LOG(WARNING, NETIF, "%s: arp ring of lcore %d enqueue failed\n", - __func__, i); - rte_pktmbuf_free(mbuf_clone); - } + /* rte_pktmbuf_clone will not clone pkt.data, just copy pointer! */ + mbuf_clone = rte_pktmbuf_clone(mbuf, pktmbuf_pool[rte_socket_id()]); + if (unlikely(!mbuf_clone)) { + RTE_LOG(WARNING, NETIF, "%s arp reply mbuf clone failed on lcore %d\n", + __func__, i); + continue; + } + err = rte_ring_enqueue(arp_ring[i], mbuf_clone); + if (unlikely(-EDQUOT == err)) { + RTE_LOG(WARNING, NETIF, "%s: arp ring of lcore %d quota exceeded\n", + __func__, i); + } else if (err < 0) { + RTE_LOG(WARNING, NETIF, "%s: arp ring of lcore %d enqueue failed\n", + __func__, i); + rte_pktmbuf_free(mbuf_clone); } } } } mbuf->l2_len = sizeof(struct ether_hdr); + /* Remove ether_hdr at the beginning of an mbuf */ data_off = mbuf->data_off; - if (unlikely(NULL == rte_pktmbuf_adj(mbuf, sizeof(struct ether_hdr)))) { - rte_pktmbuf_free(mbuf); - return EDPVS_INVPKT; - } + if (unlikely(NULL == rte_pktmbuf_adj(mbuf, sizeof(struct ether_hdr)))) + goto drop; err = pt->func(mbuf, dev); if (err == EDPVS_KNICONTINUE) { - if (pkts_from_ring || forward2kni) { - rte_pktmbuf_free(mbuf); - return EDPVS_OK; - } - - if (likely(NULL != rte_pktmbuf_prepend(mbuf, - (mbuf->data_off - data_off)))) { - kni_ingress(mbuf, dev); - } else { - rte_pktmbuf_free(mbuf); - } + if (pkts_from_ring || forward2kni) + goto drop; + if (unlikely(NULL == rte_pktmbuf_prepend(mbuf, (mbuf->data_off - data_off)))) + goto drop; + kni_ingress(mbuf, dev); } +done: + if (!pkts_from_ring) { + lcore_stats[cid].ibytes += mbuf->pkt_len; + lcore_stats[cid].ipackets++; + } return EDPVS_OK; + +drop: + rte_pktmbuf_free(mbuf); + lcore_stats[cid].dropped++; + return EDPVS_DROP; } static int netif_arp_ring_init(void) @@ -2344,13 +2392,9 @@ static int netif_arp_ring_init(void) return EDPVS_OK; } -void lcore_process_packets(struct netif_queue_conf *qconf, struct rte_mbuf **mbufs, - lcoreid_t cid, uint16_t count, bool pkts_from_ring) +void lcore_process_packets(struct rte_mbuf **mbufs, lcoreid_t cid, uint16_t count, bool pkts_from_ring) { int i, t; - int pkt_len = 0; - struct ether_hdr *eth_hdr; - struct rte_mbuf *mbuf_copied = NULL; /* prefetch packets */ for (t = 0; t < count && t < NETIF_PKT_PREFETCH_OFFSET; t++) @@ -2360,7 +2404,6 @@ void lcore_process_packets(struct netif_queue_conf *qconf, struct rte_mbuf **mbu for (i = 0; i < count; i++) { struct rte_mbuf *mbuf = mbufs[i]; struct netif_port *dev = netif_port_get(mbuf->port); - pkt_len = mbuf->pkt_len; if (unlikely(!dev)) { rte_pktmbuf_free(mbuf); @@ -2377,67 +2420,12 @@ void lcore_process_packets(struct netif_queue_conf *qconf, struct rte_mbuf **mbu t++; } - eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - /* reuse mbuf.packet_type, it was RTE_PTYPE_XXX */ - mbuf->packet_type = eth_type_parse(eth_hdr, dev); - - /* - * In NETIF_PORT_FLAG_FORWARD2KNI mode. - * All packets received are deep copied and sent to KNI - * for the purpose of capturing forwarding packets.Since the - * rte_mbuf will be modified in the following procedure, - * we should use mbuf_copy instead of rte_pktmbuf_clone. - */ - if (dev->flag & NETIF_PORT_FLAG_FORWARD2KNI) { - if (likely(NULL != (mbuf_copied = mbuf_copy(mbuf, - pktmbuf_pool[dev->socket])))) - kni_ingress(mbuf_copied, dev); - else - RTE_LOG(WARNING, NETIF, "%s: Failed to copy mbuf\n", - __func__); - } - - /* - * do not drop pkt to other hosts (ETH_PKT_OTHERHOST) - * since virtual devices may have different MAC with - * underlying device. - */ - - /* - * handle VLAN - * if HW offload vlan strip, it's still need vlan module - * to act as VLAN filter. - */ - if (eth_hdr->ether_type == htons(ETH_P_8021Q) || - mbuf->ol_flags & PKT_RX_VLAN_STRIPPED) { - - if (vlan_rcv(mbuf, netif_port_get(mbuf->port)) != EDPVS_OK) { - rte_pktmbuf_free(mbuf); - lcore_stats[cid].dropped++; - continue; - } - - dev = netif_port_get(mbuf->port); - if (unlikely(!dev)) { - rte_pktmbuf_free(mbuf); - lcore_stats[cid].dropped++; - continue; - } - - eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - } /* handler should free mbuf */ - netif_deliver_mbuf(mbuf, eth_hdr->ether_type, dev, qconf, - (dev->flag & NETIF_PORT_FLAG_FORWARD2KNI) ? true:false, - cid, pkts_from_ring); - - lcore_stats[cid].ibytes += pkt_len; - lcore_stats[cid].ipackets++; + netif_deliver_mbuf(dev, cid, mbuf, pkts_from_ring); } } - -static void lcore_process_arp_ring(struct netif_queue_conf *qconf, lcoreid_t cid) +static void lcore_process_arp_ring(lcoreid_t cid) { struct rte_mbuf *mbufs[NETIF_MAX_PKT_BURST]; uint16_t nb_rb; @@ -2445,13 +2433,13 @@ static void lcore_process_arp_ring(struct netif_queue_conf *qconf, lcoreid_t cid nb_rb = rte_ring_dequeue_burst(arp_ring[cid], (void**)mbufs, NETIF_MAX_PKT_BURST, NULL); if (nb_rb > 0) { - lcore_process_packets(qconf, mbufs, cid, nb_rb, 1); + lcore_process_packets(mbufs, cid, nb_rb, 1); } } -static void lcore_process_redirect_ring(struct netif_queue_conf *qconf, lcoreid_t cid) +static void lcore_process_redirect_ring(lcoreid_t cid) { - dp_vs_redirect_ring_proc(qconf, cid); + dp_vs_redirect_ring_proc(cid); } static void lcore_job_recv_fwd(void *arg) @@ -2471,13 +2459,13 @@ static void lcore_job_recv_fwd(void *arg) for (j = 0; j < lcore_conf[lcore2index[cid]].pqs[i].nrxq; j++) { qconf = &lcore_conf[lcore2index[cid]].pqs[i].rxqs[j]; - lcore_process_arp_ring(qconf, cid); - lcore_process_redirect_ring(qconf, cid); + lcore_process_arp_ring(cid); + lcore_process_redirect_ring(cid); qconf->len = netif_rx_burst(pid, qconf); lcore_stats_burst(&lcore_stats[cid], qconf->len); - lcore_process_packets(qconf, qconf->mbufs, cid, qconf->len, 0); + lcore_process_packets(qconf->mbufs, cid, qconf->len, 0); } } } From c5a4dc4c657bd8b006f8f0cc6c73e4236879fa32 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Sat, 13 Mar 2021 17:20:05 +0800 Subject: [PATCH 23/35] tc: add supports for tc-ingress Signed-off-by: ywc689 --- include/tc/tc.h | 9 +++++++-- src/netif.c | 10 ++++++---- src/tc/sch_generic.c | 6 +++++- src/tc/tc.c | 29 ++++++++++++++++++++++------- tools/dpip/cls.c | 10 +++++----- 5 files changed, 45 insertions(+), 19 deletions(-) diff --git a/include/tc/tc.h b/include/tc/tc.h index 600fa8ccb..da8d0dcc8 100644 --- a/include/tc/tc.h +++ b/include/tc/tc.h @@ -42,6 +42,11 @@ typedef uint32_t tc_handle_t; #define TC_ALIGNTO RTE_CACHE_LINE_SIZE #define TC_ALIGN(len) (((len) + TC_ALIGNTO-1) & ~(TC_ALIGNTO-1)) +typedef enum tc_hook_type { + TC_HOOK_EGRESS = 1, + TC_HOOK_INGRESS = 2, +} tc_hook_type_t; + /* need a wrapper to save mbuf list, * since there's no way to link mbuf by it's own elem. * (note mbuf.next if used for pkt segments. */ @@ -94,8 +99,8 @@ int tc_register_cls(struct tc_cls_ops *ops); int tc_unregister_cls(struct tc_cls_ops *ops); struct tc_cls_ops *tc_cls_ops_lookup(const char *name); -struct rte_mbuf *tc_handle_egress(struct netif_tc *tc, - struct rte_mbuf *mbuf, int *ret); +struct rte_mbuf *tc_hook(struct netif_tc *tc, struct rte_mbuf *mbuf, + tc_hook_type_t type, int *ret); static inline int64_t tc_get_ns(void) { diff --git a/src/netif.c b/src/netif.c index 90af4c829..020b1940f 100644 --- a/src/netif.c +++ b/src/netif.c @@ -2199,8 +2199,8 @@ int netif_xmit(struct rte_mbuf *mbuf, struct netif_port *dev) assert((mbuf_refcnt >= 1) && (mbuf_refcnt <= 64)); if (dev->flag & NETIF_PORT_FLAG_TC_EGRESS) { - mbuf = tc_handle_egress(netif_tc(dev), mbuf, &ret); - if (likely(!mbuf)) + mbuf = tc_hook(netif_tc(dev), mbuf, TC_HOOK_EGRESS, &ret); + if (!mbuf) return ret; } @@ -2240,6 +2240,7 @@ int netif_rcv(struct netif_port *dev, __be16 eth_type, struct rte_mbuf *mbuf) static int netif_deliver_mbuf(struct netif_port *dev, lcoreid_t cid, struct rte_mbuf *mbuf, bool pkts_from_ring) { + int ret = EDPVS_OK; struct ether_hdr *eth_hdr; assert(mbuf->port <= NETIF_MAX_PORTS); @@ -2265,8 +2266,9 @@ static int netif_deliver_mbuf(struct netif_port *dev, lcoreid_t cid, } if (!pkts_from_ring && (dev->flag & NETIF_PORT_FLAG_TC_INGRESS)) { - // TODO - // TC INGRESS HOOK + mbuf = tc_hook(netif_tc(dev), mbuf, TC_HOOK_INGRESS, &ret); + if (!mbuf) + return ret; } return netif_rcv_mbuf(dev, cid, mbuf, pkts_from_ring); diff --git a/src/tc/sch_generic.c b/src/tc/sch_generic.c index 50e2bf39b..61ac2f487 100644 --- a/src/tc/sch_generic.c +++ b/src/tc/sch_generic.c @@ -50,7 +50,11 @@ static inline int sch_dequeue_xmit(struct Qsch *sch, int *npkt) if (unlikely(!mbuf)) return 0; - netif_hard_xmit(mbuf, netif_port_get(mbuf->port)); + if (sch->flags & QSCH_F_INGRESS) + netif_rcv_mbuf(sch->tc->dev, rte_lcore_id(), mbuf, false); + else + netif_hard_xmit(mbuf, sch->tc->dev); + return sch->q.qlen; } diff --git a/src/tc/tc.c b/src/tc/tc.c index 6759ddde1..7c39adc43 100644 --- a/src/tc/tc.c +++ b/src/tc/tc.c @@ -108,20 +108,29 @@ int tc_unregister_cls(struct tc_cls_ops *ops) return EDPVS_OK; } -struct rte_mbuf *tc_handle_egress(struct netif_tc *tc, - struct rte_mbuf *mbuf, int *ret) +struct rte_mbuf *tc_hook(struct netif_tc *tc, struct rte_mbuf *mbuf, + tc_hook_type_t type, int *ret) { int err = EDPVS_OK; - struct Qsch *sch, *child_sch = NULL; + struct Qsch *sch, *child_sch; struct tc_cls *cls; struct tc_cls_result cls_res; const int max_reclassify_loop = 8; int limit = 0; + __be16 pkt_type; assert(tc && mbuf && ret); - - /* start from egress root qsch */ - sch = tc->qsch; + sch = child_sch = NULL; + + /* start from root qsch */ + if (type == TC_HOOK_EGRESS) { + sch = tc->qsch; + pkt_type = rte_cpu_to_be_16(mbuf->packet_type); + } else if (type == TC_HOOK_INGRESS) { + sch = tc->qsch_ingress; + /* mbuf->packet_type was not set by DPVS for ingress */ + pkt_type = rte_pktmbuf_mtod(mbuf, struct ether_hdr *)->ether_type; + } if (unlikely(!sch)) { *ret = EDPVS_OK; return mbuf; @@ -136,7 +145,7 @@ struct rte_mbuf *tc_handle_egress(struct netif_tc *tc, */ again: list_for_each_entry(cls, &sch->cls_list, list) { - if (unlikely(mbuf->packet_type != cls->pkt_type && + if (unlikely(cls->pkt_type != pkt_type && cls->pkt_type != htons(ETH_P_ALL))) continue; @@ -186,6 +195,12 @@ struct rte_mbuf *tc_handle_egress(struct netif_tc *tc, if (unlikely(!sch->ops->enqueue)) goto out; /* no need to set @ret */ + if (unlikely((sch->flags & QSCH_F_INGRESS) && type != TC_HOOK_INGRESS) || + (!(sch->flags & QSCH_F_INGRESS) && type != TC_HOOK_EGRESS)) { + RTE_LOG(WARNING, TC, "%s: classified to qsch of incorrect type\n", __func__); + goto out; + } + /* mbuf is always consumed (queued or dropped) */ err = sch->ops->enqueue(sch, mbuf); mbuf = NULL; diff --git a/tools/dpip/cls.c b/tools/dpip/cls.c index 7fbce1b8f..718d3d8cc 100644 --- a/tools/dpip/cls.c +++ b/tools/dpip/cls.c @@ -79,7 +79,7 @@ static void cls_dump_param(const char *ifname, const union tc_param *param, printf("cls %s %s dev %s qsch %s pkttype 0x%04x prio %d ", cls->kind, tc_handle_itoa(cls->handle, handle, sizeof(handle)), ifname, tc_handle_itoa(cls->sch_id, sch_id, sizeof(sch_id)), - cls->pkt_type, cls->priority); + ntohs(cls->pkt_type), cls->priority); if (strcmp(cls->kind, "match") == 0) { char result[32], patt[256], target[16]; @@ -106,7 +106,7 @@ static int cls_parse(struct dpip_obj *obj, struct dpip_conf *cf) memset(param, 0, sizeof(*param)); /* default values */ - param->pkt_type = ETH_P_IP; + param->pkt_type = htons(ETH_P_IP); param->handle = TC_H_UNSPEC; param->sch_id = TC_H_ROOT; param->priority = 0; @@ -124,11 +124,11 @@ static int cls_parse(struct dpip_obj *obj, struct dpip_conf *cf) } else if (strcmp(CURRARG(cf), "pkttype") == 0) { NEXTARG_CHECK(cf, CURRARG(cf)); if (strcasecmp(CURRARG(cf), "ipv4") == 0) - param->pkt_type = ETH_P_IP; + param->pkt_type = htons(ETH_P_IP); else if (strcasecmp(CURRARG(cf), "ipv6") == 0) - param->pkt_type = ETH_P_IPV6; + param->pkt_type = htons(ETH_P_IPV6); else if (strcasecmp(CURRARG(cf), "vlan") == 0) - param->pkt_type = ETH_P_8021Q; + param->pkt_type = htons(ETH_P_8021Q); else { fprintf(stderr, "pkttype not support\n"); return EDPVS_INVAL; From 1777a38dcb346ca7c05403fd723b9b4ffdb809c0 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Mon, 15 Mar 2021 10:12:03 +0800 Subject: [PATCH 24/35] conn: fix msg register problem for cmd 'ipvsadm -lnc' Signed-off-by: ywc689 --- src/netif.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/netif.c b/src/netif.c index 020b1940f..24514855e 100644 --- a/src/netif.c +++ b/src/netif.c @@ -4228,6 +4228,11 @@ int netif_init(void) netif_pkt_type_tab_init(); netif_port_init(); netif_lcore_init(); + + g_master_lcore_id = rte_get_master_lcore(); + netif_get_slave_lcores(&g_slave_lcore_num, &g_slave_lcore_mask); + netif_get_isol_rx_lcores(&g_isol_rx_lcore_num, &g_isol_rx_lcore_mask); + return EDPVS_OK; } @@ -5166,10 +5171,6 @@ int netif_ctrl_init(void) { int err; - g_master_lcore_id = rte_get_master_lcore(); - netif_get_slave_lcores(&g_slave_lcore_num, &g_slave_lcore_mask); - netif_get_isol_rx_lcores(&g_isol_rx_lcore_num, &g_isol_rx_lcore_mask); - if ((err = sockopt_register(&netif_sockopt)) != EDPVS_OK) return err; From 15b71f09370ec70fa9f3894883ef7cc2d0f0ca68 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Thu, 18 Mar 2021 13:06:43 +0800 Subject: [PATCH 25/35] tc: fix cls match bug for icmp/icmp6/ipv6. Signed-off-by: ywc689 --- include/conf/match.h | 4 ++++ src/tc/cls_match.c | 8 ++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/include/conf/match.h b/include/conf/match.h index b492f1f1f..b364c99e4 100644 --- a/include/conf/match.h +++ b/include/conf/match.h @@ -58,6 +58,10 @@ static inline int parse_match(const char *pattern, uint8_t *proto, *proto = IPPROTO_TCP; } else if (strcmp(tok, "udp") == 0) { *proto = IPPROTO_UDP; + } else if (strcmp(tok, "icmp") == 0) { + *proto = IPPROTO_ICMP; + } else if (strcmp(tok, "icmp6") == 0) { + *proto = IPPROTO_ICMPV6; } else if (strncmp(tok, "from=", strlen("from=")) == 0) { tok += strlen("from="); diff --git a/src/tc/cls_match.c b/src/tc/cls_match.c index cf2e221bb..f9e67d0b6 100644 --- a/src/tc/cls_match.c +++ b/src/tc/cls_match.c @@ -124,12 +124,11 @@ static int match_classify(struct tc_cls *cls, struct rte_mbuf *mbuf, goto done; } - if (!ipv6_addr_any(&m->srange.max_addr.in6)) { - if (ipv6_addr_cmp_u128(&ip6h->ip6_dst, &m->srange.min_addr.in6) < 0 || - ipv6_addr_cmp_u128(&ip6h->ip6_dst, &m->srange.max_addr.in6) > 0) + if (!ipv6_addr_any(&m->drange.max_addr.in6)) { + if (ipv6_addr_cmp_u128(&ip6h->ip6_dst, &m->drange.min_addr.in6) < 0 || + ipv6_addr_cmp_u128(&ip6h->ip6_dst, &m->drange.max_addr.in6) > 0) goto done; } - break; l4_proto = ip6h->ip6_nxt; offset = ip6_skip_exthdr(mbuf, offset + sizeof(struct ip6_hdr), &l4_proto); @@ -137,6 +136,7 @@ static int match_classify(struct tc_cls *cls, struct rte_mbuf *mbuf, err = TC_ACT_SHOT; goto done; } + break; case ETH_P_8021Q: veh = (struct vlan_ethhdr *)eh; From 7872a16b3e4d72492917dffb24135c7052a5bd33 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Thu, 18 Mar 2021 13:12:12 +0800 Subject: [PATCH 26/35] tc: ingress flag of qsch and cls should match Signed-off-by: ywc689 --- src/tc/tc.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/tc/tc.c b/src/tc/tc.c index 7c39adc43..3d295bee5 100644 --- a/src/tc/tc.c +++ b/src/tc/tc.c @@ -117,19 +117,21 @@ struct rte_mbuf *tc_hook(struct netif_tc *tc, struct rte_mbuf *mbuf, struct tc_cls_result cls_res; const int max_reclassify_loop = 8; int limit = 0; + uint32_t flags; __be16 pkt_type; assert(tc && mbuf && ret); sch = child_sch = NULL; + flags = (type == TC_HOOK_INGRESS) ? QSCH_F_INGRESS : 0; /* start from root qsch */ - if (type == TC_HOOK_EGRESS) { - sch = tc->qsch; - pkt_type = rte_cpu_to_be_16(mbuf->packet_type); - } else if (type == TC_HOOK_INGRESS) { + if (flags & QSCH_F_INGRESS) { sch = tc->qsch_ingress; /* mbuf->packet_type was not set by DPVS for ingress */ pkt_type = rte_pktmbuf_mtod(mbuf, struct ether_hdr *)->ether_type; + } else { + sch = tc->qsch; + pkt_type = rte_cpu_to_be_16(mbuf->packet_type); } if (unlikely(!sch)) { *ret = EDPVS_OK; @@ -149,6 +151,9 @@ struct rte_mbuf *tc_hook(struct netif_tc *tc, struct rte_mbuf *mbuf, cls->pkt_type != htons(ETH_P_ALL))) continue; + if ((cls->sch->flags & QSCH_F_INGRESS) ^ flags) + continue; + err = cls->ops->classify(cls, mbuf, &cls_res); switch (err) { case TC_ACT_OK: From f1645e5072077176dd5ff5e344c4afd9a6026558 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Thu, 18 Mar 2021 17:53:51 +0800 Subject: [PATCH 27/35] tc: add document and examples for tc Signed-off-by: ywc689 --- doc/pics/tc/fifo-qsch.png | Bin 0 -> 14939 bytes doc/pics/tc/pfifo_fast-qsch.png | Bin 0 -> 39215 bytes doc/pics/tc/tbf-qsch.png | Bin 0 -> 49533 bytes doc/tc.md | 862 ++++++++++++++++++++++++++++++++ doc/tutorial.md | 7 + scripts/setup.tc.sample.sh | 40 -- 6 files changed, 869 insertions(+), 40 deletions(-) create mode 100644 doc/pics/tc/fifo-qsch.png create mode 100644 doc/pics/tc/pfifo_fast-qsch.png create mode 100644 doc/pics/tc/tbf-qsch.png create mode 100644 doc/tc.md delete mode 100644 scripts/setup.tc.sample.sh diff --git a/doc/pics/tc/fifo-qsch.png b/doc/pics/tc/fifo-qsch.png new file mode 100644 index 0000000000000000000000000000000000000000..033100793fa8feccab92e75a4f7fb9cc461fae6b GIT binary patch literal 14939 zcmd73by!tz6fP)&gw#PoI;1;=L&HHpkVZo|j7O8@t@p|w#2b}D=1Y7T1gduZv1i7YkUb|n{DsyaU}0@?Dc^k zy^f}nd$!u*NIrNR=%9!r&y&^CJDB9rc%m#4*)sqc(EhFZRz;n}G%a|IJ&dhSWYBP* zl_`DZHgH9;IWl}BKP`=#lT$fo#M9HWRi_|7e^h)4`qzwS^VExqiptN=Pu}|7yJek&uw4rl!i-uCK1zHh0{-6%`ee_-#=nYrIbLBO`IY zv~Rjhf8cjz2&DG#_I4(JPENkFyIb@YQIU6IUmfV8x<_xYawZwtkD??`Fd&3Nb;8CkJKO1n2la-5x< zECy3<&t~k^7noexuDqF*X_huk398524_1*$taSXlYG5!${y!`T6;$+Y<}3dD}0g ze|tA)3!p5r<|0Sa4J7h*ec)g4Tu)|H`Z?L)d2E^LWAS^6*mUT>W9#G5(NP42Favs9 znQ2$CUhUCZJv`5c<^A+Fd( zOIMfWa(jqdJ;ij1qM)>Xnhm4eGrmttuSfiFayY=r^NsbXsSCyW^=3Lm(udiY$W(X7 zsrP(C1cZcyL8#bAr>D2eUpam6^Wq-Y+L^c#R zvPmG#Ei6n{=Yr3m0FfTDtFC|V2qh0I1MYxB%J*8(#>=a2&d$9aEe#(bHCHnGtfi8> zbG%5`ed8ZZ=VZ91V)w6#3O-@s`eOF2_;s(Bs1Xu~8iyqqzG5peh{E3P?p$38`)&gH zF#>7F!{rW#W_u+qS7pacmQS(&Rya914eMN(3w*Z5lCtVN!rKJ&8AladCKc^au+uDye9 z0;ge0Y%I#UZD=j&5MeAEFE2jJ2ylZ|wO)Os>Zqh7%t&}0t_6MK5$o$m;%|3wB<3`} zZY_6wCHiWSjbe+s$2Z}z=S z({gS+YB`K!)p{DJqN$m!S3a^CYwkH-s6!o+>UXDO>vzE>H|)OPbG}{D44b+-*_1-J z9?RFTwq_>A4lOh3c-p-hqb#QCq{$&0{R?b92zlPlri*1v*cWQ_dbeGZ$^a9@(qkFl z``dGx0bpSoG(^)YX=%k&vALq3lp}-%KC8LgF;{D2jR~#=^-eZ%3u_2OTtY&<#`2FJ zKMWeZ@L(JU4U_Fp-)dplA@aOkw*t$ytDUK<3k%u)jftMmj+8QQ70uca7Bgr2qln)e zAk+LZZ1xo}Mni`zXjWU%s6J5Bi|9PX#B6M6$W~0sCe~A3 zJ027_EGeE1!?4_$spj0sFDwkpbE$Mf^w!noCx!f3`VtwL6MMEZr5;jRU@t_@#>#3l zl8sGcw%Yyey?#%5gV*VMSa*}p^$(LWvTeB_>lQMWmA8Z9_aBv%a6V?o8Z>?M>Njs;H=FZ*O1eid2_h|C@Y0>pTc@Uru)R=Uh}NQVy0b$FZ?7@3S4xo5L=e z?8od;CnqPa+v9T}!;p}W#KpzMFe>BV;6#Ln``n&RnCmboVtx&(CFA-uXD9Bm@lRb@ z^z^8Y6Cd_U^;fK>rsm#s<#qrX<;B%@$?@@VFg6(x1x2IrA5^AYuyBnD0`e}OgI2Va z&hX4w3rowvA1}A3EK}!7jlm8uuUPL?bMNgp-`~x9?Wd`wxV zmt*1J0K0q2TGDW|RZy}w-$>?LZ`u{{3G?~iB!PWk8&U5wHX8U!&E@gB_}Qc>gy_dh zoAC#`?9Fa!Qqt=I?+XPk0&;r!bU+4+x}N2$VU&7#xJjae~?U zz~eiw|9x^xF6wo%+7lC2_KjAS#nfag+CL7g=Q9=w41KEJol`_g$Cldt;$R8n!prs4 zVBM*jIkJ`kL&YC_J?yPuA+)r#>2I`DR1SdYb=ZiyFSo`D!pg$J!g5l)dS_;mAj2TF zX%uLF`t(Wip*jqTUH4B+Xl=s%mBpRcuuLEP2IQ$law0bEQn0xo`#$^W`o}=qQw{u7 z>ApYjP`sYvjYG*R?6yP9|AClXug+z25yTBHC6+A~4Be%0+w~;0`LVjlK?^eWca!tI zIgt4SMOuG{WYn1V#jmcKL812Z4U8!sZCzc#qh9HRteO{>m(0o5Tpd%tElEs4P?8Ed z(PdQtV}nF@zw>)Af?Tkzc%jVk_vd9Zi-8{iQD~MJ=Cw@J7B&yhq4~S ze#mvkZxKQ>DE?dNymduHsLML%9RF!e{%zoDM$ z3@0Oe{Jj{`0J?m>eVX^4UNVCBx$XK!u$yW;1sCpAUm2_ zSaf!E9gEy|(?&`tg0)x=j0TsyMFf^2*8+$?pR8DJI3s%O){Bbm(o>3%;0Y z#$j_{Pkyz!QU5&I7|Mi4fx5cBzFzPtXPmIh5ftR_g3b)u57q;)h7sOybJVAToRq;D zu>!&&qlp$|8AkXlxP(I)q>LI4A}4n2eB&+M30SgJ??bI0<7Z1>jbTuH0S8f;Gjyjx zyP{=xaed85PtS~OsabFe-~@1@gQ8X=bZl(J*RTC&FwnwfG9x3EgK;R*wiFV1q)R3S z6M2InCe<^x{JQe;@?KtEE-rjl#H7S z+3zU>ugxrwL3D~V?d|sgZhpW*{R0C4RecE!jV=-2K~2;fBseGlQ0DUbIv5WB33Ut{ zt9o~Pb5rNOZ^xg+%EBTONl8IMB8MKJ3Gj0GpQQ&w>#Mi02cwogiwt!9&Jbb3-3f5# z+F)wQ$mU+-xk1g#e@aPWK3AcHtV+p3l~zL;05Zx)uR1^%33}~WSy|(Hv9Y~h%b*9e zN7J?X80Yg-zAXVBA7BUexn61e2R9Ku{z%rF=cR&K6(CapplG#R$W0%Uo6B@cQR{>V z6~ZqVdrH-B@!N@N&W`m)F!+^~ol3-HS?%U9W8am^K&X4g!!AP}M(PA`&p4bL=cM z`!xddkPEwgSZoOhpQeFAp`4t!??&JIu#7Qra71v&Cnf2=eT!m%_?-Xh4?r+6?-s@+ z%FD}nI=&yc|4WkuE>n~4tf8q1`v-%d=)?;~Y>(vNVzJ`GGBPr#b==(COiWDljhXIK z1f5aEKw%HKb$dbi(O)H>(G?f~MbfA;AREz_5LBKt%%+;pHkz}su>oN4AONkvq}OM= zWPDct4DXH-1QtI2p11xMbq`3@(eW{vk5X5AJD=BKb1kZeu zO+x;G6)u(!jlf5d^!%qHAB;uvSXmoUJHix&VX9C|3zlR6FucfT;HXa|A<~BcHv_6f zeOc5TSRSAS^*Er872)O{8qk;W${gA#b2DnJ#ulr1Keol-i=#y^r>3nvnPBKE=y|jX zwvtlN$?Wn7p&~i=EM_|N=wc;O;FdtZ$F;eOK;V}pVz`BXjQ@kjPFWWz|C8BGx83R1 z(XQ;DkK5H%Rlg-A%~aXyX2Jh?(!#-k^5hBg*z0eJ@9f`uvxsX&f4gcMx1Fh?hqt_X zYECGl&SuR=DjB25XViGPaq87*fIIO$DhdL`f}4v&5Gc);Ye~K4ES7M}(T=araVdme zq01>L?M;?t@~A>M(1fhh)6>J}7dlNJ*&*C`t9kf`Tcq=GdB&(^jE$GRayFs2CJtrF zQi*sRVqp~{x5 z%kdw9mhY2i?`oaszpTe;8K@4PeQtj8ECxMOaq^bW!w>3+ zQ?-TW-U-e&Pzbw$tfwYVLr`Yc^9)!|^r>q-Q5&UDG7jlNAoWIZBsKcKTz`D#9L>*5 z#&KkM;URcq(@0QLeZ~9|oMAqrr_h(}Sg&n=BCC);K0%882|(^M-?Z33ibdX=d}H-T z(R81JgQXP6uMP{{3O}zvzxN!rVRA|k@{xvx>K!#q8l-Z80DsY1mamQs8EHVy|Q(F9yRHjB1+xzh+?|l0wx%%7Sy^}^-hwanS+^`2HxT?FnBDJ`EC6Y>5h8T*TBcbQ^eY zUthY&0e(!6i>Nxlma6i`B+}r{fJGK+ZU;WWOgmQ~l}bD+_c&bs{{1_^@i(9}qcm=w zUIEXypR047DmM=a389V#`Lw&d!FnY7A*4k`Z+EK~?lv+b0o*jx@!hSQ01@~^bqZ)V zJe^7@BH)pnoShduR`3|u!fN97D$n4o$Q08t3W?`6VJ9e zTl?FfM1V9LHOTmAwa4(|p|q=OrPl$$%`mWwW}!A71qJr*H2B-Snd<%7T4&Ct3lk%w zv{oaoL| zXIpD9r56)ByP{|%*5B_53DrQ!;?S>S%6`avvkK`7r?2JYE`iYTo<9j2RZ2plh0MO; zTS`h3$ZtSN@^^eeOM8D9>1SqUCgH^;ZDh2VuTe0X?ayr@7Y#N@*mdg_y7gS$DWH5+ z_6x-+P5(+?#-7cL6=(s7{D#?QBRyE`=HSnIs-KvUkk{$fSfN`=0i7Jk3=l4reqVq{ z0Tg%Hoj!e_leNx_VKFH{P0`eVfBqan%>9C%URqP}rDoCtHt=e^OSDWuV#GiLtWeN) zn$OQ(Dxw75ul8}8TxTQm^C`c$;ml7mJM0K8Chw5m6xkU{{m<+FO}mUy}bNk zUoC!<$iNFVSqrb&SXct3lf%mL^I0Sz)Em~-Gr!Br0|NtrPzj2c2PjvirKPHwGI$zf zVQO`N;`R6UPfkwO?>8m!(XgcP_3-fW(#r+9W@j*IYG{D;GPQda8v69R)|1;!aL?8q zx}era9cQPf*f=vrHy5T zLMIm%sAj#9vyH+W$rFH=0F0$d0L+NFm$A9hwphN1bkoP#yffc8WAS^=v5NwT8)xU| z!Ir<5J@h#y&UR-w^=kg*sWTE6Kmn@*sVojNxDYJ1+I9vN_g|Im49jJK@>9h;;IGYq zd}L*1fyAgMtE8j^@)-|e0O16n{nw7+VtrwO-@?gCRpQ!eC4fIwxoy#sig0zi3S=RoRLGBP;CTxo#q{;C?tup4QEEc+uN z6Pq$KGs_2l{(+%r{f5t>y7+t_`?ktJV^p!{?d^)ppsN((GFkjyG}{WQn-Qv!d;Ipt zod65FYYCx0&4-SByCaoUQXy}Ztl8+EQ}$mP%SeILVPpcuKxD9QJdGdOeEXQj53+NG zFFcx6l!V$4qiZyWaN2aK1GPZxxStomc!-Xz%dj$SomvL{o_N77UjDvUW5_l|jf!uv z!t=T{(QUFhw1pxRvez-^Z*ym`)w@PWnT$tD_Pj#2MO_=+R2D{26_7kxz7glPD{_CD zX6Oxx-N+bDq{~N(AGU*+z0An}vgL*nE1MjmYdo5rTU-pl&nifefQk2ReMOsxPi+st zM?Wi&`Z~nCBV?W^*8CxK0l4e?_gg?OItS^fO(ZW{WazH{nww@IVj7KzkWevM$aS{H z5t#Q0?q#r7(So5{a6gOR95sKc04*-xDCUajiVK#zd z8>p1Gc5^1ia<~OP6pIn5?hW-PbARWAtHVFkU-!+$82)VV-Mk9GN7W>Jduu*zDCDy9 z@0sNZwLrUVHY*IBrY9uCN7%Le;_@`~%NMH&4wti?H2+85iUWf=N_gjSIB>OR#Kg{r zCJSL<8@aj7=H`{v+K)XA5@J>+@-_0RtDj%)eulpsd#O&D7~Q~^C^PbrL!3wC$mL=a z$8TF@GXJ236)nYQXKE%Vz(LrVtP;8P&FXUkEHaUc3ELSLF)_H>vdgo^l-JL`{i>i5x8*RZQf@Gfz>@Y3XEDOXo?er^&(|#XVgph+5UYdUj=41=hXa{kh+O{%2h@ zdictvg#A8;u`xMK`J56-3lBRw7}1(Ri!*qj20IsCC0-oU4pG!9cB!*;KbYGGQWA=<&>61 z32h-sd-7o?i}AU&S-0I5pfQ$I+fxBp{LFq48sYSlu5YI4-gj}%UH2{7IE>;Bk&rAV zX6qpaBQWj~Y$G1o4!5=zH*DBgC^J2|7r~{4Ly{$B1?8FPV;xSWbh=vZKrIX#SW&bK1N?~^^NVOuv4`T(cKCi}~$XVm%Kpk%PO z_XmJ1fW;Zre_5+~F^pb4L16;lIKA=>2_f&|*7@v4X&Po4#y!r-J=E9t_15AhR7Jkd zZ8t8)G~WChcc)|Nqxd?#h=T*W)>fMLXpifk_}s0npP$twT&exz$cpU=gS;R{fngZK zx#PcoukpLT%Z??1S6B_bm<)$lCHz%@Vmz9+q)I`iLuoUQ{O-3 zKJ-c<%^m7u!Ym)^dVNAC!a#neMSC*6jq?M2XCpHGlBnHwwZv%GQbIxk^iag7DmE7n zS16{dBSXV(O#0$4o!8j3>i{=S-<6UYBTJT#rbEA3Z2c2Sd7D>rA)z@Q(&W>1e=?Hu z=dE-jzXr2iO(jAJXv=X-WBIDcKZo||H=JM0v^ zj*9YxE0KG6)QB2|k<5qnc?PpxCR8}jRR?H%9C9q(gXf12M=fY&o!8Wij*b@V*TJi7 zawsc44L*5tscG}655dOCiHV5`c5Y*`Yy{NEev3CBKKz`)2|l>|*Ca(G{&ng@lgr^S zMVb3gIpGnp22sn!$M4X-Hz=&iw0S|=4J)ivLGw{~zXAE4znbQoOij28f?qgqh);3h zc4U|H(29@Tb?jMf_+FIP8?B=m_b~~nK3i`{^~tY@P;{E!6Z)f>WC7#fc-8$F5xevJ zvob)_u=_Rt$p7dsv1_C+rEDFyUpPZh2It&exFbZqqY3#||r zyS$cl9RYR2FSbIO?F<8Q9dJp)gTRl}l;)qZytW#7B=5ZDv4$S!vW7p_Mg4epxjx~> zP&lWcRWk9=pw=uPOpc3})VC(kiDxJ30}SWE{R`R_<92`mVNh-s5*Zm;&Vn^W{cr*H z#&oIRrfZO#eNMhrS66qK05WcG;CqPx@IOyW_XX+d*}I8JHI-=(Aj`%M=cM}9=~ml` z407sLg{e*dwj7K<)x4a6jK;|Xe31rX_Y+BnT0KzG9rPe@0HmWjHMwaAFz3m_9}63U zUe~A3pP#{?5y&*)8yRV7=9x;`X%yEied?>{`1n$9_dkb)gm4pMP1X)WAC_1Dg{O0Q zd3m&JWqFyJ|I3|xUs^F=C=b!ks3?iI&IyYPG7C5ne4OCajD}+tetylIk)CoeK0%FM zR#vt(VNCPi#f_j)wub{N0+hmtgD?E8Gnv+AI{b+W5N$>{5N$eUX2^nwrqNuptkZPI zGG%cuszJLKI#(s_UpLIrYZ7&F;Q@U{MHR{V>KzJn0X8D>oRxtA)u0rDj~|p=K!o(! z<|X#48GR0|SFcb^cZRdx9LSrTXpy4b#*Ke7*U1?H9V_lm>cxnRVX<6RMJZ-N>2GS& zMY;m!cx6D%-E*P|2RGuAkKyFzR@EU3+?Ci3x@9dw-iJ21Y(6t(d+`Exdu96ScUm!| z{dh4(dAnwp{>xx15z<+S9Yrr@GthQ$jzkt55_)z${)n=6qY-^?!PAC>=$GqaRUzZG zAE!0fxk_}W*VJ6puBQ|f7Sh4B+2YUVy*atK{?QJhn@{`&KkS39E}#S%OiD@oyD8p# z0iwe{W*$}JO*Yq!0m1&EUlhWa@*N1ipkpNe=8fog!3r_Z3+ejuz4`}?z90`3( z4l441rh=2$`ariJnr`X?|35;fuacPpxa3{8={B2!5$Z3(2YiaEX(w@B{XZsYZVTe! z!#XCFiz$|1x5rFWvL$=Uclf&t8m91u&~ppo175Je&$O7G~zd)6?+C$l!_o z^oe1dizpjzChTVEuS8GK(12PWPQq&ev`9ej^vlgK8eD2>`@ZNQ-)i(dvd9C?3eXb- z#OE3`4=JGzY0b@kAg_Rq6d1UmP$e+*L=YzQeL8+r`DiQO{1LwM@zD|B>Yjj(c`y?l zfZ%Kf^ci#(-vV6{Fh=JD0veQ=0QM(VNET*h3Q7Fze0-xz4qqyv$$$O*{hu3t?b{+GvJl7)&Yb#_)y5<*8v{TuXY z$h(Jez{4YDy^TkJF$rJL2L!#MiS1JuJ79{VdFo$@*a0bce#IK@@Ax$yX`|PSNA&Ju zB~vDx6tL_sc(j0cfgU~}leM15YfmtV0DGv@Zwb zTh(&-z|^(NmH}Gs)OkMr->Ohk1}tV8Je^%!PCy;zG;Gq-)BC`a2(($F1vhfiahwYe z8xnsbplVcsuCXKpw6{IL2o6vm8X80%I-_l0K4nYjR}xDI_QkVdpyxG$>&E0=Ypi{fTbt zmke~4K<;HAAp;gf*jk&oSU{2wb0rZAZw&aDu zl1TiEfwDyP@t_sxKDCv|Pb9&tM;p+Se$;>OeyO6xjuEi}y7t=<_fG?js%Jb?x@tbj z07KCMp$pJSnJ{A2^8%2aEzHf+bNL>hmApfRRD;I7K%xS8#H|ve)NCMh(@1?@Y|sU{ z2i(tScx1%#@6SZwR)F9F9R-9ui076SXvo_|y2S1B0eOClFf{fb)q`80TQoI)Z zOq`s-Ra85^L$gm_6$d;7qf$;#H7e*a^(P5%($PJP1uT}NrGGiLd-?bZ7>W%ez_S_* zjg$rwTa@b-jf0NA>HpXV- z{fiebfDE&@yQ?n{Fkk^%YB0YnJn(18DsKQmBQfPjD6o6X$yowK*;ZlMAN8f+mY>pm zOMWWls9)0~Sjh>-afmJLNop$hM(<7jCdRR}tNI)v1Hs zT;ATP!hTJ%u(lFLC3v+xXQ+v)>d-$LsaUfCeeGYqrG8oL#HQtJ_%Iwwp_9AijxI3L z;XafR_9u8jx58?Fyr{m)?smIBaeXj3D0Z<1f*p7#@9h7M-*$eMn{G9LS)GztAOcD* z{_*x@UNOV&^r6yvwEO(!>Pn=!j-~((o+3b6pR z8O5}yc>L>&C1}P*Fn62d7{9KDZA)YAn^3*yqj78gs0Q~=QP;uOG8@-(7|l`ZVcp=l!OQ29P1GJR*Jk;(1wOkRz^H4bJc{eOnmIObyB z>e2sUFTv66l2}(w`Pj!|i=fgxT)=L-WOnvBQ|SM>zKHHkmw4SiY#Av>W5*dcTg)R& zHlnX=ed8=P9@@}WXnhp`vp5ii@ykNOlEMwjDW47jR)f^DE1PWtvIW17;f)sSer|h$ z4PE3$3}KMiI8AUqruIa)+q8pKK)49ZrAyM3xfKP#!g-jdJM?k7^^49#Hb zeWC0LLpN0*kk0&Tb;SMBeKSg?#nE^GX{qTCBW{j?(&?~w9`d$;>?Q%b66_DIA@7Zw{bZc7NF2l)xEk_!h?%)1yJ4fq3!&v1 zKOf=UF>4#5A`+;ZQFzf>^Yz{J?)ie(cH@wtH3>QL5K*r;qRMWvuGz`Y*U7hDXZLPo zPu$oKoAosdgfUdm)>*T$Iu!Ck?wL~K7~4ik#Z&t9WY~;!l?`G+-mS-WrkZd!L)j+v zuUxWzR`D7K`7k>03Q0kE^i5C8K^I*wSI^4d({i3$`A&E;g^2ORh}IB;5P6C*F^Ofm%nl~`<;o({Tw(|w!~~ZH>YD{(nUtQ3jq7x7^bv>A!us{{=MF)e zpCny8G740V%g6YUV)s_g2{-_q#H~G&C7__ZJv*tcXLLzj6sBQ&q3|s0=HkWPRu!V1vBHI?}Z?GjlvoQ&KGED%DAR|P~Ml$=NP{akmGwW#U!{KlS1=vzlzO&I1G z@z4_J$7espdtwhc;t2C-!l$y>R(oc@`Q~nDo$7Y$PstV5wrTYg7z{e;O8uySu}79^ z&81lYiUe2sSv~%0#;_Y4v@eh;E5YS8OJk|Q~WITm{bp=P|6 zP$7=eqMkGd?6s$kr-$A}_pfg#6Nc7gAJE?8SfTArmadEzt22BJfxws%C=uKiN4#0L zv1fD77x46e6(5r07nu{XdCrBn{aiPd2m|kXLRpHjh}?PA+siwY;L?XYb)vxpAQLX#K)i!^*U2w z+?cLp{n&D<^m#rzoGco~0;>vFFpN$EAcQ!W$H+Uf|C3Xbe)CQIahUF`P%O6b zme5ZbeHAb}0~%lW%Bt=xsC6~ESOEG4P@@O>XXRdj9|NcO0S; zB0)TJltgw;2V~}kc{`yInJ3dpv19{OXB7Xsz21+GrIKd} zwRI}==J$$c97*o3L;ux@cBRn3-fD&)%%hrO zV92@Ei_*{-^fY@8MATiyAfJNyFWdPBK@pJ#AU%P1JKo>75aUaK8y%lZIQjNbYq>EqL)Kf-07ursCFD)N#3J!Z4?tje%Y; z`LCgJ@=6anqRX3!V{vL=`TSAP8( z$d;y{zn#=?WJ8*t!8{*^C&SEM9WU!G>jy>ri5`v)Hl>U=pQ5i#**RUktGG=+O#h`M zT$-INGw67Kb)v4OBL|JxaQT&OvC|%6A}N{pXUUcaPeUL~?VW~(1&^!m>bESwit+H= zc^T^U{_f3D2|F{;^YcGedwJgY0)$o|cxQ#Y3k)5Q-O`9OQ~6#j0gW9D&HSfl+#F$k zMC|V`fay;l6fiI_fWElf-t3g3#V@za%Iu$?xZ@#7MjOUV9L#P!Ihmc3%yEhR69-Sm zo-huk(!n8aMQc)5Km$VCtCeMA`;|u3iJqlEz*;me)fJ(LFKxn=d%QnH%gkS;Z*7f1 z^Y;fSV-}oSQ(fJ-lIT~?y?poRTGN{ez+JKE>r#>K`yJwF#n8c^J(Ae;0D=eB0G%1sX?;Gt6r^84xd;5+BBhd6!d!5Pc`Mh49>t9iU&6j-js{NsR zZ#_)6fjLJG2qch>z&J3FVM#p>SlF#LfuptkcYxVasVguSA3?&qcGyLQi-BQ1YSVfR zkB*5({*0<^Q}}=mV^!qyn7$Sb6s=^gU|bsVhO5aFf301Kc%JGL59S)H47-Rk!@#w#0#zsW7Y^JS&TR{xxwwhwdC7#_j})i9yC8*-wsDG-fGiy<22j-tsN2gjS2F7 z%tE)x3E>kd3Pv!yD+grgQ`W(|P6yj1mU}fTcEOpUYAFzp5V)B5{f_&BZ6fzd`u$73 zB*M|S@ql+s=z&0rPSybm%qb8ZRqha;O+c^#J=ZNDB!M@jfYb~I(6`SPeh`zv&C+So zMKioSNE#aS3{dB(@pYo<8pkF+$-;I;yTU%|`fH0t%?ET0$# zY*U5tzzVR`F5B}>G76o*DN&qYS~j*FAol@j%5F)Fn3e&~^r!v6uDH(Js^VukoBPf` zZm;(z@qctX&q@3)R+p&n)4JWh&G^or{aeA^{2;W}in3ndde(&9H<3Il-gUE5Z z{FGA~Ec?Hoir+ZgUmRw3MQF%cK_gAM?s_IdT~+Qj4h0SZfw-+8FRh6{ps2&2cUY+K9U6CIbNCC} zMPA<>zA5v+KdZ{tj}eGqgo3n$w)fCV*Z+x`%D>*sc zx+mHFmkW}>%*@P&VlLAIgPfe4hL(K(9(fAE|JScH#Pfr}9|$C|xG18$L%ep>&Uynf zgBco?Co1>8Sm z%leizET7Zkr6^*p~(|%h1Q_v5(pCuqG%@=;~-|$0sJ5iw!+A zAmMu8Qr$tvBO-EHGj5W~WBcES4U?^Och&v0zw{?|JRVH~co`a)0Lb^76}_ISI)(IwefV+`K$=baVj$fpj6S=&{YXu;17*37Y!) zYa1Kqon)p2lB(%~Odo!t%(aE#8aKW~4}R)6IkIxPztYdAT{h@UMM*hTs$bRYcT6Oy zQ)YmUI66A2udiPkG!V$Y_RydXd34yg-O|20w>p@EBuLX1QynPh_S*XMqAvxdwNN7m zi;gwnEx{9?Jxhc-Yr@*v+EVN}!&&lVwXMYv#m2t*vuC3{32d+NX`^|+IM0eozA-3c zMpB9Rk~jy#U{*V2viGWbJFuAs9NtCb=I1Y5e!ae;lsj4Mjyc{K-#*!0L=rTcn0D{e z`tL}<4BOTb(ir}LYuVh~T)WH+JX@=vsCZ_(;ubD=oVJTPGBK{MhOHdpi5EQo5L@WI1{58cVZ5*Sxk-hd$Jlnr?1S z?kJrfZ$EhTc<=IfHd8df*VcB8xKx&o=D(eI{OZp;k_E%q_Bb5ctj~TXK0>+A!h*f= zs)bFtLbQKafuv_>&Evptyz1#LSFgLjzc9kv$LD!Qm=eZ6LiJmZVmT=fP)5eEgs}d7 zhh=g&C`UV=dEAq&z>8yr{gbJqd21wn-(a?wp~zrAf?&Dg#ov+AOw2P`=9-Yj(MTwg@aHV#=hHa50-WgYYUc0#EF?-*lez|?&Ed`QAe z=`8P&jl-Kw#fkaR&MYI#K;RxFWomr9bOM!dzeVT0C|O1{m8R$w4N^o_ad=OgsYZ$N zdUjbJ&Rs;E+h3gJ%oQK)=7I26J;HHB*q#C>1$ykVn>iI&L=lW!T&};;1SFY=M+Q)b za@8A^tOlQGx=NY}@@~JkODItIRmsjn*;Z>e^UW^1kK&f!y@@@}%p_}889jlvvw0ty z4a{dcZC50AQ#tgnREU-Du~_agKBer%bL?j3(Cpi2*8KF_4G#m&F-n_WlUnI5k!^%B z$#&rxFJbZcM&&Wt*rPs-{FpL%cGB_EF28_2x!*|yVyn24eT=Mhiwg^a9;@P5_5P=B zFJFF%iBWTL;ndzPdL#XDX~|OZjUM}_(oz_hgoK2|#Kc11yAje9QOu&Eq7NTpZ5){N z4^t4n>y(j^S^1Uz^T!WYSJ$=4nv$w2?!1TfnU6_m2kdIhD^HF#r@X5^ene#(92z3x zkftAmE%?~9rBtWHu-0k1Nb5l_=Oqn&?8C=54^{fule4q4)6?f#%RZt^On;NDGqOFO zX=psRvI;vb@F5P=h?UniFz{XOO)f2E$H$Lp2{<<}Fvu?`_-Xm%!GnU2AL)~o^VJ`6 zx<)TTX=G(({rB%*MJ(N4WKm~#_v_cMDe=NT88@DtpASFJ&drrB(x!jMSu`}H*=*u$ zZ~rc07>DFtCsp@!OTbiDSJ&^~$#2K(YAP!$udc3e!swxZ2ntSq`F7^lh=s3kZ1OYMe;^tQI>O)*yTx#kRT>jg)Z+CWf;A>XaeOg*ti}X}Se)fpYg~&*}GM#t*7SpJ$QIvc} zMn+UrRC{}SpG=xrW91uOZrWK{J*%Ryzf8gZDzclUg<7mF{yViAoD-075}m7r(2l4fXUq)<+`NOSmra#V%a_O;qZB zGKzTf=Hz0(|MK#()_GPHi-nbSeYgOBtn9;w@rvi|f-nEQIpx#f8~SzCO&jDe_^~rr zA&!(?kJG9rKAKu6@aVU-j*d>_%gr`}^{LwOvNFdQ8WGMgA+Ife_>tpeb>-*JzVl&Z zZ_Gxu^!4k_zP z8+}qQu_WnZqt)&0>lwf8C|WTS=D6FGPvM3wVq;^Wck+62wbi@Ae8Lq#`3DRV5)x2*+n(xdjI*<_(7pQG6}k|D9pf3BlvL+3FNu$jk8ttupyII^ zu+9vC^;P-7H1HnoK)I=y5j&ye%gsr2gn1?ZOGH%xmyx5hSoO22P;<_lCtelvhu0njUw`b7c!9prgrgMIJiqZ;g zR1u4XgM)7^VwLCv!Co6Khj`5sDq^`XChZMBY&zPY3$A5i({HcvDWBpM>wY=;IB4^4 zs&?DZxmiYl({Yjqk;M5NwVB(z11qjpn>FFX2Qq~J)#=KKr_Lv%dr${@Lk-GWymuFn z*u;p|&Q7b5LJf|loi=F7EmvpuR1U6U=wztK_b>ybh{wha$WznLme?;#rNZ%&d2BVy z3`2jYQ2m-dJb&yy(vm*%&Kx zfB6z482%ZKH74rm-`6eYO%m?-cfVf@6OpIsbGm0W8C>Cs+S(OGxqEtfe#{WEyMO<| z?(^KV@^X$qV|Km1l}a3_7MKWrL0iDqvY}R8S8jVtKMv`HQS&$qstfZE+F#>aT$Mw$ z>;2l?EONwxe6NustFNawR}n7NaXcIJ)qB@mxxj;UN->wRxVOLWm% z`DXp4{BR_{Z%)-(J-O#7$defu7})0I`lrrSy{xf|3^13UwY4>g9HvNG_kt)Voh7W( zgGar^Cgt5**a}Mf!OC62bA4!-cg*@q4QrzlapfvZnte5fNblbL{rk6%xA%bc-u^x{ zHMKzWyXa`<8pqP1E$5xNZ`I4sSnno&be?Ugj%!Psdw)Mx{^5KKwUBHo)yqFcMMVs< zk>dJ51ZHPxkEfv3iv~NE2VGxPE806o6+SuG-zRn&Gv`%g{O|jB`QBjc`N}yMoe3h& z{U1Mm=+y#-E%RkC)T7JS2VRDVQ!z?*oe#X0;Jk+4t|?-@hl& zzBQ75Z4lJfd$2WqgQ!aj3vGUXW)h8DFZulWNsU32+T`S<&a-FD2SZ9Fy5(QKe8C8I zUF=GTj~73DJ1I?HRJkg78$BAy?~~snR!rNVTcb{F=Bvd{N*UE9c6B0Er0w+kGkR1r zBay<37x_ky%ZrNvx~Ri7*sHv?J4F8eR22Wp?(auE_AcIcIwdlf=!m7IrGb_0KlNHh zMh^D&_MGI5L=l5F9xg66(+!7E$~CpLLPJ9%T7xM}zjfN_=}iEkfcgv%ggA3twlpOr zWn6J#QE$7@_Sfr{4u=S@NVA4eA z$RtG=??8j6{euS&Y7F}Jo3vStp~{7zVR{Ho!&jGlGt}&{tQWlIJrl8O9)d;M>)YG- z2p}j{R#tcJ-0^L`YB41q4jQn!XlL70rr;@ld>bP)J>3|})bP;I!otFVuSj4-gI3}B z1LY=w5v;5@2!J`rW0WkI5V!^aY#$yQ<6vXcd9reD6yCr1(bi_(m%@8uZD+SW54(g$ z*kP2R2pSrSz*`p*7U^{1t! zogHqxHtvEJhCrMX7S4=R)YQmW5J<|GeMa)x##a7-;i_?(78DdbgNkw7bGOB$D$uS> zHm+D+QL$lrywpOw1Px!=n-1A1U5!T{i{%%PRx+|nFZ`z2S6^S>#Ds>~e(zI3K|yG! zl$8||g=CzZdtNKHq?6MY%xBz(569~z{4T+7hc^zPB3(nRn)P0cf`tVwwG4RT z+ECtcx{FM6fb*JIV^7hMpc4qRg=N3V#3nBjy>7nwKs3m~_NQQ-X3>UIqZQ z-oJMN{EAJ?Y*q!BcbWf_=f;@&)E!b(Hsle*@n%X`rZRo31F*xMcoyjXY&xYAHTL5x zFkcJrI2PjCj7{PMQBd61Sq9BGzyEau6ui1>Be+&#tz)l~Q}LmDra;s*PW${+c&FBH zfNUMj=p#QrKaVyp8X{JnF;*UNpMe2wdtiu_1m{AbNL%zHiC`@r78WNtUg}fF_0bYC z(v_fRUHh^*yJ2J|b!1)V=qy>G*Q;=rDAA6W|c zx-8}VlNT``MvLLDuC09wk)555ze1z!u84=5kmpoUHkqoUc7K~`z)D# zG8c;fF((H`0~>LEw0ZvbgDIkQbo4Ib>hjo>Ao8P)MqEmYfx7yRI0G>$O5+;90Ab%e z?4NJSE?Z2wg@n%Mdz~qQcSiKnUmQ=?*dsVy7dreBdX`=Vh8kq9@T-=O|NOfLRS31U z^Boy9o~Kw#vWkj|va+&fX7@iiZYiDkDRc*UZBFKC7NMe|G7@1QF)+V-j5vYy?2=eG zYWEuNp4;jGotj!dRQB9Y*cMTvEq|{|JTfqY?@$ZQWx^tQWq1BXa4$wga1u4;!jgxg zxw#nt`)!Q&ezEIFeES-Mv@X+TU(eCvXLY*_z1U6+TCTigqt)yq-oKW{Q#{`!YC!pF zfM%_!IRxbjy4N}J16Acr(e&jDlSg;YHvq7k@oM7lr?9`OPd20%TZ3T@FQoBj9@{%{4FMI+K*joH={q1xR+=$4zPsHk-Ew#72Y zDT_XP{+w>Qxum!ls8L&)-=6`}2x(GMQpEez)OWK9DF@i^h5^R^*)jpH0-&c77c&@2 z^}^(TZ|!HA1Q4KSrHcj_u#^6@{0uAcK3h*u4+8OE$Y!P%Alh9voj0B4RHV=;nAI{a zU{fs=W5&IG^G04p<=I0L;eJ|S?~1G}MDdE1Hb-bOVRUtk&W9c5VvSMgq}arYN=mRU zQW&AM{KGG)T;AKvof!ar>|JE+BF^igkw(PO#kVr6EjNBvMI zvJEu&0NrYbLbLQ+?23W~E}O%}%4!E{5*h|xMQv@juHB>is2?y26ET7TE0Ns28*s7P z{pr)E>e%tp${W!pwYqOHEFt09v?;Os9zXjgLEZZk=m`}F_my8VL0NTm{9aziFcFJ1 zywP+wXtn1I2oW#E?HQ0(^YZfQSj-(A1OE0TG-P%gcSC#Wv3z1^IK^%Ki$F3`c5%5c zwbPudp$^-ac?;d{02I(@EsP@V2Cr=sAkSff0cSjl_&7Men>FK-Qc?guES#O45#+#4 zesX+OF7DI)`0*oWAFv82Z~!BQQheZ=G-|1iE?7nKOg{_^K72d2fi6^G)!?ewi4jT1dgOP6ggpM zx0^O@Jzx$XFRinA7aoQbs5HR$P_p!^UVxgBG*w;A!%ot_4_E;JM@B{l^mO1-O$dr1BPs*oAMU3` zl^~@&9~XQ1PY*yM-*~xcjM_(!rc)ppiz+;hj7SnPvhiJBTEVACtSD<5lEOcNent`eyG)#Z6kbG(ZXTGjgE;&7F9|H;Y8#l^+y>MAHfH+sv($*xT`pXa)^ zn3xzyOBNQx1!|d49&&SYvm{-C`2%1Bf#XIF&ImZt2i6_ENP^P|`Y{_Do4dPvPEHO8 zIF^o%%^=KhS#+5dVbj6d`1bAF;NT$ecThBFKz>jYyYx8v_Zup-Kd95NzY>2~f>K=X zv8Gv~QwI1M+6m}eFuuo|Q}CAcSy@@Y!=a;wNzQL>HhZiM!4?8-!PncHk%<7X!LI0n zHSWww4xK9S@^Bnq^hCT&hr{p-I};NVCnx9q`}ajeU?Q3TtlhhJ4|aiXb4y)aU2d)= zs9NBMKqHaHeVH@3y12;gGS><(-PR_Vp!j!jvC3|=7{niif?*KWHpa_w4HtnK4t4}D z*1_K(^#O>Un3=hXQxp9S=nU`!*<+)dl|A=LzF-&t;1)obmX;Q1Fi?{~^N0V(yhAln zWgP?a4MM9R^DPXFnL5`S#p{G|tKoHc_!aD1P<%j#!p6dqS5ksGfk)iWzn=ao>*Bk5 za^BSl`_baNbc`Gv+fc{&D?ok;zkNjSA;Y61T{WL0Pm-^0WRwOPhKfFz zB04&x6r=ahqpP9tXlQ7R{AZf1^Ghanbbt6h*VIsp_*Tm^N^)pt-*0+Kj`#HG zQ=py`=IXm!>+6cem)GiKeXeQmZXURSfd+QH3G$5KtF`Oe3@VdYcrYz5ETE41E*ji+ z<|Nb}RnJs(AUMG>F$;h6=#gu<47Gtm%{ms7VxFb})d5la!oVYpY*wW2;9%;AuI7pB zpg<(S6CkM|gjjwjNLRJCvg*WsB?wB+2J9r+sV8x$|E&i+VW+nHSIOV@FS+_*inYH+ z`M9Bh0R{$!WQ_#nBO#T08}9@4v#}NW#%zX0M1+M03I}szaoOZ15(}T4oABAvGkf~Y zmMhRal{7(18vAHAsTa1{GS-meJbI=@-7i(+Nxey8!}g8E+|_mW6*}(vec}H7!Vp?P zkHWAp8I?mkmG@&s>M}8TIhi=OUR1Sd#@Xdy^)fP9s!-mITfj;mns+j;-r=Ha4`U&g z7k+e&11#-u`%y*S=DnaBiayg6r+tUXuW#8&SM6(ho>p{A+Nd0 zcnN>#N_k&v>}l6})J=VfSw=w!i->I3Oc}}99V*N&sZNjAFl*Mba5<`}ml0rmd#OE* zonkb?qxOmSwdaGwfy{iFccd?0z68FqF;Te)%obXKPe%XGpYPtj6%4w9TDdqIccOMd$<>{{=&oN}Pnap8)}J9f-q-nO;3-^D?1-~kBr zdt%~Xd-i&;fC_)wQKWEwaIt?WU_Uc8)o*Qx=}+GRKln7got=ON9xu_T@9Mtpe_$*vjtSyxU`KOGA zIB`{BrLrZjuqIaQ0>gU|LHR%5J!35IyVfmwmml|!UwZj9h~K&+pBs)pbY1Y@l^7c3 zd><~cqj^C7tEiwZ`CF+|>f1SFnIBQF#4v7tG%s3VQZA<}?B;AbS77dt>(DJ}TVv4R z^m-kQ+>Ugfg>~-Qg0oGG99gDQCvs=>D1mo7YfO-xy%eMg@)#6+)$D#VLCIg_=-t`s zO3HCTwm7`|QqDN;Hs*Sdl@r6Ck(dmx*lIEd4M{%l&^xmS0tP}DX!CXJ?Iw|<1!EVr zca6$^6lfNDjNv9xM(*Gv;>upke?nHCRg|T84P+OCmjNn9z%N@%rgbdwN6jE7L%WwX z#!yynwCPcI%x2;Fns*vvr>YtH^wXK;UF9f~f!v)Gry4Bo2;q)#m97=&p_W!w8pGoL zgWtYAzdSqiUSg~Hn%m=;Ge}NK$}F6PlV{;WJCgKjUg8_pH6km%1PnR58bU%s=v_8H z`uCeaF9rc!QWCXU3v4kEV!#8cj@NleB99$OsHvy-#u*^R8uSYJIW&wG@ zz{Jc9wtnlh>>{Wk;59vBWtG(b`}eP4`eTq(ex(aZ67Lok7J{Ax9NLyA1w;?fi3_!h z0Z8GJJq9lWlUrX?(+%1?V9AV?pFKTbgZ!6~aR&ic2M>6y??dnkp!N;T^=ACZ`1_q$X)9-?lbED?KumB~wKALg{hZ**v zTntT3Rh1#ITyU7YGoagvc<*Fs7Tw7GKxe^h+&G@`bMA_~hjzmJcbn;W#?#19|j%4{l?&VVnZ@HuCs-aMv?vP=a>++IbFrZ%8GHD!ei-sY;SKb3>Ax3{u~N9>@NmnZwfDN9*Fe5 zAbmf~15OA3F(Dr&C@R~5O8~_X`5xr8OBhLjYv6vMX557=g%Qs(FFD@W*cc3~2q-#m zd$2JZa{HJZ>WZJWj6BL0PcBN4WxLE=#FgIVwk_d%u69+Vm znTd%dq36cRT(!Z|;PE?hplXJI1)sx&%i;PnQ&UsCAO1)G^yTEblai7kKSCuIXha{I z)kTQ_+`9empM$e=W^%HCRS$`UCu1)>2fh^{O+-z7*$0Z_>E90k6X|2W4-XR~;uV>~ z^`_ufFhVfDB|lYfUY3hF05lBS2_hwcWdR}D0s{(bw4fr7lC9M9U+BPw6*V%Vow1^p z#yF?c}H{vqJpsoSRce(R#X-Qi{gA3Fp z7@M<`leu|I=v2^a;GbRXKe8vii4pL+uqnR6`pK4h-Ln5n2%6YqgXlpYjD8?!Qn+76&4VLr=cKtmdBT_X6kV!bA>TZVH z?(FP@A~Fe?CS~TFY(E=o9?S3VMf{F{SAK|(AE?{VEaIW3M@2aOsmmU;DJ?Ap(qtNV z`uE`A02uZHilJiO6DFoSz~vx^180QSgIaU~LNuTX7_l$DhwCFFBj9pfo~~pfKnaC8 z3BkO=Qc(S{(DuEU=P~+`G_04ygM$u&X<&meKQx?X2;sI$RJ_A)QW%vEDff{sRJy=V z9ezK7B?NUIbbHVudSShdjU9cDWdP;}(hYymRTC^3sI%^{al!C?*SP@vXbsdU*pi;$ z-ir8wWKhtbE;J5x2?`oCR0w(qKYe;*=z^D~S#$u~6O>+XBF;h01_OpGGN!Rn=+if_ zbwDrzdIv-I5?~g15kPC9Fr5DH0S&#+j1#J2K0E`;_ZcW|@rr+;9yc{N136z`UIs(H z0cwx%AQumh$3jO0%-bal6SVUTusLAYoSvK*K{Wi3Ct2a z6=$%O;Qs%*`2eAli=yy6*_p5Y-vbJL|L@|8~jA_HH&s!WU?QyV@$;><_V_XvEKGL3K`cd zA{ot0l0p=gmd5}5OV0Dw$1sJ`;kKzMR*_N+%C{FP__|#ZL`TPkRA4Et?HqzKNhdLA_1jEQDWhun-V7LdXWYUlW zp1F~DRaElI?iD9!@;(~g%fqW)@+B!{|;*WvmoOoWi2 zpdAlIvT_%;8Byf>d=?Tpv^evg6XF0|yAFvgzWEu^r;>|e*&b#TLi?dAYd1mAi5nPI zWh8tHmFC-QjwpwyxvBE47JJFrga_I_cu}bTHz4$pFE7Na)akLTp4m%t&ZEVUt4Fki z;l2>&pQ9W5!8|E#G^DkXnUb6T1PvRDZ;-J_+@c#a*q?g)0Kjde;Td_<{2SOe* zGcSW{11e!OFie-(B&@WqtS+Qc;XqG+|1}6sAm8kmiO+`+%i{y10wE1@mcNp@yS;vpvsJBm@XVM(2#9S*TgAiuJ{XIP(Y!ObjV~6q$6yEJ>>~GHXT8&PT%+>>?43du7 zXHZAiDUK(u01-fCBH;#z55oGLBae{+a+L-;VBmB`?C8OX@ zL257)x*gOfp#Ax3nKJ+-Kx)PMJA`uzUHMmrh!B7W(0^`XBZ7hyN$LI!2nPQEiEeR_ zsUoE5h5g<*Rb-0@*T~4_kUX!$t<-~TSc0MzSVU_t1slE%j`GgPl@k$=w=QxcvPSLJ zXlN!Iq&y3;ee>x9^75tY&Ua)z#Yw^5J@!P!Jnw&}ZwMK} z?@;56zL2z82~EDK0+3A7W50ugV*pr>Vx3#ql{3#cKtj$et%U}XIzZxln*%{W{+N#7odp323K2>xi2Q(z z<1#b1S6BI+XM}qcB;^5yf{GXfG8Ld9cD)J&0#ZUy&EBS6z(Bzq<*UF9LyQi<9IaTO zzmd@&(ERBc7(h&f_KSW%FJx$z1pk^d>?LHmJP?Ev2#{8ekw9SxmYE}BZe z!NV59RW3BqvfpwAqp8Qjm6<7py*UwJgm4H5)B}zHA^2uO5C{K$?*kG(+}*tg883)( zx=3no%)iBpxf`=;GL;)VJPlDm#0hj4P@+IByGiv(eupv*OBd@VZqswqA?UzzSJ~>wOM7CJYDyaf$wY+MX)>0s7OtAhWqQHwFhp4MB0; z0xcdSHHhCpD2|+_Fr1L3!`#)zrm(8Y9amHHxamJtj{2s4M+qw;s@}}py#aGnGHQo-6CHg8y1Iyn2q$gZ1M+veoowY7T?GD=T}h?1D+`UrR- zrN9$fgHgadOZZ{QOvI(1p!m$d;H^G7!qCw0z-@HRqs?Kqgb0Z_Nk&R)esp8Q)cjwz zQb;IuLU0FM%a-NQvm^?gqRuGd+C?db zJf`}uU&SyyQ4k}}DB=*zhSK=qk>SxF_kODvFEoGQw#!>uS_(t@4#5eR2p8__<5M(h zS8oJac4K2>EiEnBfC3Qr1A+TyMxjlCfe3kdz&=12NfmQt{~-nddBY+A#+1z-B@4>4 zc;Ogt+qA3O+}3>hIw2vT&0I~V*%lF4`85X3;DjOu);u5&3Xr9BD`Gw(lx35K9HNYX zWg5M zkUs><2%sh{Ee*D73?<($2#NqFb#ii2FIzg^-xu;&9e`B;Lkf-)gdc8~>1+Vlviw1m zu>!6x=TeqNK;!!?z=y*d2;aUihaQnk{hV2dh8Xq6uym}Bw8u0Egau~O6-9kr4 zX$2J%;BqC{DNu#-^YQ{9xe41t4Aw1lR8dHJLg9ehVp2*(;5rRrwc4KHFtNwjYh`C0RaKfw}Gp^9S#Hs z5f(4R4=l)=1_sotLu4eVx|tXm;RYaGaSIc3QIH-~d9co4`$AxZg@uJ$(4)?MMHK=| z@7^ha;{SpJSaL;01prDI8w8LTA{rW1riS`@Y^gpn5)x?{85o2b+u`uVI?&``+kCft zA}00~jG>+$MQ7(5$)EU}#sIi6b~Ofhnjj0BIj|!*DWU)qKsN>x50(T*C>=Cb(B9qXr^8^{V11Qo@G;{8sW5J8udGr zb}^DxIrkxsP;Y+at*)d;&rgo_H%0r+546aAH7bz7Fz7iw&=BjFs>M+=4`Y#*45+W& z>HKV4u~SG0>!OFr)+PvOiH;!2ynW#dUITQOYa*2>f+Ctt^Zn)kn_FIRxvk`DaA0AIQATGQiqjvMTE4Osq9IJ+Qh zNSC_J=XOT^YYC0E%?(@`=Z-AdsI!LPdW%H+CHh78C-bcOmS-(Vm zB|jQ@szU6bcx{#?r0KmtAtEYqf|xdzvQC1D2UG(xD%itu?>Uxe9*8Gh+=37SMI&PV z>Z!=*R|FqN?QCs)qZbFdX{0*FQ<$e7*r%A~*We=>&TlL>;CrxC0jk}h6&0qZUx1=d z^~CZ!)D-X$VM;f)w@o31?DiKN27PsPacYg7S&-8p36}>;2%Na@{r#r(ZdR~lfyDb> zU7XI%&E2#mphu#x9e|FVAFM$%`vj7jK>fQqI+Rm*iXf>AHVqitIS|X>?ibz%ujUTr z(^7a%IMfjn8F^#gfOQG4NkK}QuTugW-PGrA0(2b6vC%OyS^_ls_3Ibtfz`FO)`R&e z53!`0Lx|Tve(*4z`fP>Ul^5~$Eow0EGtjM$*M_Y@ZAZQra9?f$=?h69tU!b>*&RaG zE?H(!0|}xVnFuHz6Y>H?AA}3Ax37?P1^g%6e}k-$?-k;hAL)Na61V}8gv|@PjzAJN z+H2=aFwDGrsOcE_`A?wEFe9t&M%Q5xfOZ3E3aCi{(@=u}qFP&8f?EnvT3jRng}}RP zg^d?kdcbcW^ofE~0nGq` zD>|$_sF!~rZ3d$9H6!xAAu>FTxgU44H>_MMPq$Ck?(O|oXrt>UzaiR88+eiplLbSX zXZ>aE(AP*~!$YuA4=lPAu=Va#mN*!hMOZqpA+U#VEy#f91s7+@Y2N>gAfv9xF=8*3sh3W3-~o%N6wAQ z$EPMHARJ&+=YoP*<>F-ylZo*5t^y%RwIGL!kt;Y?r=+H)=J4o9H1L97aomQ7T9+-!lhH!(AZiY6J}!iF+5yS!D*T| zRP9LlYKmZl8@Y1&dFV5;4oN!m2xkwcL$fkrh?R}IMCK={LHgM5-}PIBG}}T#LU17N zE37l{eJP@rZ`|qO{HSkcb&yBP%rpg`UpD+k#7%r`ygol43rCEw1bh!xAsJ#L2#LF! zAOn;Du=)peh3V+B>*~Cr9|ALueD{v+(Ic6!2e5A!q0b#1AFC-TQQk0}!5px70jo+# zpdi3a0GSBTR4GUpAcMh1#|Zt_)wK-WHdD~E7VdTZo@w1<}D~W5r(@z>%X7lv;Spt(rUIeIp)K7w20qKz~$lJuh*hhU6Hyb0nfa@ z6E?WVPzemrh|1FwGBKObKCv7dD|`1q*?VOuZ}HQo3hRCh?WjehJ|mHgjBMZeOA}4I zq1AC&Q&ErKJ!3tMWyeL23L2uf419{*jz3K^7l;eJxC#dWCT#YwN)Ge2ar@16`zpffmsbQ+p0Sx zbTU#>ScxKlCxB}Jz)ng^YIt}U5GzC#fcSMnVF11!j=T}C=_jWAhD6y3v>Zd6%E*{?)~Fe^kf! zZvj{wX;!Vx$Ngd4xCAHt`@)&=@k<*Uek;GIA@XljJ-*JSN;6>J{Njl7{TS$DpiUN{vv{~t;JvS$@o3Db{tc|+AY>%|e9t5tl*2I5sYu#;qCxIvXKEl#GVuDdD zoI)I(ym-|^F#VfZ>-!jTLyB@pkL#3l-WG0B^L(ML;dOcDS)@&I_imWo)8hSMyBcyS z3gU06NyHHKQ>JajB_7m%{I=0^BT=3aAmK-rfzQH%Z(3V|vv+eUBX{@@I=i~;G_|BA zYq*O?wSRa0v}93G_>GwMJ$!O+q37qjua`%t;&MeKg(k|3L~w+S&za`@mk`4M^E3Xo zpo+^--K)U7k6EvXT@m7u9c^9od*ZpiRleIT&7Nv&wA3CAAr}8~sO}gBMeslJ6<#2x z5TJNBbO-yZ`}n5?B=Y@C^X-HLqKI2NNo09@fBybFZ)^8DT9;v{UF-zyozL;?B8=<~ zp`=Th>YLb=P70Ml+93b4F~iVKbC#Dj{B5v;Ju$)N00i~t3lG{pS=x(S>9y-F}?<_Z_`i9wZoBbj|4Sy<92PwYbV`b%x@ zj5-RDTF2cAlm! z2%_7gBE0eZ&tK8U7D8+k2tOQnt!o5qrkzzuKtpNuugvgoQPHJr^sUekZVG|QiLBv} zJS*Sl^PQ=Da*-5P<}x=&%qaaT)iO2cV<&7z+^H~IBcz-3dS#^3o=_AFPkSQ=&2FJS z!A)*a>{s$eaIQx+z46-fTouI!xKior=sX3o&m&^)Z7@)WWoo2JL`I2=j$T>oRNq)fk`Vtr zS5vp_>B3~qMUP@(Hr3F^p@PDo;}mJp=qXXe-C&J^lF!v1ib{7k zof5ryHC`|c>(~fn5dV;2Ot)mRer84Jkx`cF9h!0pT;4A^7(p>| zf~s~lvG+3Qc}Uw9ng3L?Xo*HeobC@~)9RLhG;A~pPMx%U zObm<|>Z$&3bpF>@&!!t=psFe=M^bQ_?QKoZ2JH9MQau>jxJ^HJZNrhEl)lApJmK-Y zr-^0dD80(*#pcw^@j!!AyY+33ghbQ}=iB)B-gc+~3JKg7q$D z>)z%>OVgic{@VV!a&4`|5vwSF7;+nNpERPV`8G-$ik2wX#a9`NwFHHz0jFCW2)p!< zk!t6X%WNWJ=Um2$f|BbJ*+hUf5tyIyTe z)N)d}P+!~tKk_K*hKhv>dRw`RwF#f&QJbE3oYAlkFVYJF+LQZ(&r= zGUZpv=9QH57#t^6oSZ82In3R9`0U*T+JnQzjs(bU40_5|wMEngPlkl;y z*uoJI$Rk8YM}uxb%VmxPFjZU2m(<;ual{J(<4vj)3WwE`nyGa*x8#oQd)CI|maDfO>90f!D^^Z5mF3`|ZSt=iU$JSd_q61}z0KH$M+f zsgj?cA0kQ6la=mPzo(r`J$X(d!Dq0c5Ea$c4sbt^{UO0YM^N#*AY@W=zLVemK=1_O zstA{tFTqKM(}Z_H9zBI*ihoyR!mpk0HtdAX8G>{2_SV+pkTiv>04+nfp76#7R?Fmi zZKOQb->=3<1i6Z5IyzvIp#~d029S6=$vvr4s|23)70fHZCV*4nFdqpC9NE~><&Jn^ zLr~Gb|2|7=1C%96UVPB(2#APY^%Lx`uE%P_0c8+vkOUbkv7TVD6$)`?Cemni(GR;j zuTg&qcD^t}2Fa^G@%lFrdu7W>4%8C_9SH)-S*lj4d`M3s`C#@QB`DHfHZ=%}f7pe_ zX1)FJ6L8b}tAykfk8WnE+7(KQ{D;t_ZWgCEh81ZbF}^W?b&-{o`T?niXRg8-(% z7BO`N83;Jj(#rEv4Q+zV*W2v;Xn!A6mC*G#y;YBKe>av)$jhPxr~IM2{?@zfgb^wM zE)m~r87DZK35H-r(pNP!ct>p6oIyyo!@;-WR3W>6y$vZ$qXjl|@1xImAkGx?oJHc0 zL{GU`6#T4zY4ruDkvlQB7@vL#w3Gz$)_PIejtf-+;A7Jf%);(23O9Zga|-S}@AH>6 z|9gF!>Hvlx07=5a&_BKCn?Bg(7lD&vX$ENGFzGw_^?w(Z+Q!FqAAf)&3kLpX8QlN( zTOP{xAkCQoh)nUchxGg0Gg6X`(-9s3$ z)-hTV5~Ou!sv!@x5{NsIWZy5ktGHh9P(bED5zdMq@9m-HSNbWoVqMFN?^A-pIWjev zuxUX)U(|+&fL%AVz3UJ;-$uv7LsERal-#vti^6ch5HdMMH{0uI@!*!5(EaR;#qZ9B{&npY;zO$v$F`1yg;EUT|@ zDi!BwVZWA!gZXkBJ9{?~LY@)1MqiLmx1{rQ) zRnE3+{OZe5ss|f3DHCJOO-=vvXkuN~Nkh`#?1)mML85=l6AYp-NewU4P%==rz&Iw( z*DA5OP$(Y<8M!wX2=fn1?^!d>LT%Q6;X8rT44iZwAYp=>2sOmztLPnCP}ub#pY+$Q zUXT3+!^Pt3-)J7%di#aFx7O!EKjv?6pzznFUB8?h! zg}OXIO%#PgOGVnngEk<{0Xq+Md&W8n=d<_s_dzO3Db;O2<>p|6M_3AgZ5&b(>kRvka^nBees5G1;R*@A+x zo{iLcNK%*IylqE{fJhRAVQ=^%NDC${{`xS(Y>?ThsBr0gdl#f0FBccU_k}_wCpy5Q z2jiQ!e0bl2UcWV3G7i52AEJ^8AH}i_hh9Obg+tX#AR|G(z9YLXI@jd_|IVFHsc?`U zLWV|A4d55xu=(4M_h`h(=j}t_Kmf3Ph!v8MkZ4~Etf2KJK$U6m+<=2ooOzloM*VOM z38?!G|2H%o*P>4_?8gIx84+AkQc_`qB!JbQtUT7_<6duM;)NsmjJC+j%mI!{E-!cO zgX=Gn`a_JR0{ht`@ZxNQL!YOv2PWC&t`7tz2NQf8UgHditBh}G8MU^ak~SBSvAF&? zz7a-!`_Q2Pe?*QaT?@WEdnSs}&KT|&617uyV9*UfCAj&RlCD7R*Xk5>a$e3!wr@iMsQULop@7B<3zqyn zXe)4p7@Q>^Sg;PhvxTi#vu3TfH|L(f&`5s&egMESEVi3i&g<7VpF0D|cL`>mQuPKG zf(YsVDCT@j=ws8wg5a}bK1Fl0(o}wjPJWX;IX;H&);EwXl>-N7{);Iu--ZJv80;M? z7O1I1e0Y(vIY95=a1uxu3JUOH3+U8i{L%1%BFREtPSVo1GIl3}TM08(PC*phfHPe# zUJsRSXlN9C5(FeX`n2sbR&IU-{VQE8Xr{e=5gbX{fP+B@zCD2t9V&d(Ce|K@&fwpU zuuV)(KAnF}`^9Du^{yvB;%%on*yZ3Ea)1=~4I6*76jXF5{p(|8sX8Tj46Jn0j6_zJ zmPyY7Tp*lDX5yU<2uLt}3k1@@t@@R~^G!ayfBo?wQMu3d`MBVe1^IX7~_W9)w>q}Q~(qJ&JG-LsIA{Ov$tm>eFvWpRYy@+3!iFI$2&1c zw7V(<#vOcS1=L|9mATw@GtL_W7BWmPkj~m>T4&)X)30Hvg5h;z*M#BP{waMrycHjD z=g*SxJv(Z%7ERy*5S^ow3dcH(7w(614Pe2#f4B0s;cVl>LoQo+>rknBtw}ugX0F!f z4knXyNVNOn_r5|xH1t1H77z&@91ZUp3H}qbE@)&mw$CUs)-z6i1kBLx8eqsfJ z_A&#Fl~Dgaw3z8?TP--{qgnu{;SvsoGX4kt8MF{ap9^{sHf{J-?ryH@ zJio&+?EB|P`}w~m6E<$s{TH2WZ>sGAM~=KMDJjX%N6!eViF;3luiu||w}UXe+QbJ9 z;T9<}>voR6>)`N2kr$T&9xky|uog7qhDMS!&vNYy1VyA2&BADv+twgC5J@h&2cTAz z0{}50U6^adYglDnPns%1sS#EcH>_c(unG$c;VVZ$a3|tPZ*QW_cPvn$B{ePWh)%vC zjstvc{B`i8sHiAfT-kZmbxp0jf6!Pb#0>bTkidv)Y#*S&S5aP$O5QV_ANU_o z9tP&?>(o@?n%g)TP7cokwbB(K%V}gvFOQq_2!{rxb-}#ZnkD=)lan#%c!Iv4KSV9{ zr01pC*>h;cCb~|a^&4NomVqj!HHr*{N2XA7dp2vhYSq%EY{z_{-<9+klSicBX zB{|@Y>2HS!9KnVl)IjE^HD#nI(N`^91Y^QH7| z@lw-o8VH`s-PV4fu$y#7O7AsVJwK;A&vHMKkdy>1!~O{?33}Y0=(%GX@7qoWDxCo# zrkkd3jlH>#;p1z43S+!zEJO3Ym(g$Dya8?ig;2;WgPDckV5GnMrRq2*j%r`uw^)gO zbbEiG_44=cUqE`0VZqw60`v(9nkuA1d-Hsi+)2 za^ya~21J7gdM2Ki_3fSAb{uQN<|dDigp`!OukSy|C?N)cRs&ZTq&9(JVMIpI@ww9I z-1s^nFa}hKFr9@e7^B3Cle#bOx(Cz=sLvkT(wyehfIm_NQy8c}$Qw{Iq@rW7!DhUa zeVI0SRaA^BpE%@l?hLuF4&sm_L_LIo)QPO8VUX?UJV`c>kTs&&j9eB+arMQlLxXJ~rwGQHKtEJbM6`{Jm709-of%3_nhP-T8OX&+x6-L@awo>{xVB2=!bTke^ zU9sf3QoEf_EPi)+uwiQ^ia5}Y#8`KXO91Qjj7{8|eFj128pYW~YFM8gnAM)X-Rf%)9)Z)6}fLP{4XZ-;&uibv?fdDm(f7~IDh z1bxl(;tzY-QM1_|0wu#LEI~g5#Cu|m^7-wZKgf!YV%Or@3$>sg5R!6U>1b%CXzZ%&g3!|H!dV<)fmbsZj=c6;47spWmrWezS-oJ$`Wx`ihsn(=pHx zw@^W@>ht5=j}!$W;bo4UL$)qM~-FK~C<9L$HMdXPk9tf-HUt((ki4XX~)d5u2Sa^-DiJ`f}*YmoHE&{(nD> zR!$K*P-w*T`r)DTZgWdY)tdQY{bmper9)5h>Q}opXr9_8qSVCq1n&`sy(^bsjC5Z6 zjwls~Mce=P(A<8FF9NogOx($Gr}tJ?!x(}tk)(X`3s@1b2TIG!wJtgdl%pPo<7RT# zDZ7bxxIuuvkJ^M~P`j$ZX_Eu$RnN7XPBvx6@Ja1aPr z|0b|1kP@Rs>ijykqM|}AeR*{ik&fw~zWgLjcbn;sM7%l#Z7L9#krhDqfdmVxcQLUi zx;^+E$`yFKN4U6Zo14vWP~r3^8u)-s;#TzgqK&+qFCyBybXTA}EVi^;w<@Ws#^G3j zg%}c*j(6>DAC8p@P0h@d;X7Wr(vP4(Ena#Jaq*Q*ZRz&H{l67xeqCUtk_ZC^i3rI^ zjBISuaJr&)k7GSPp5oT!P8_9B$dNj5dBBk-f>;>GsB0Cwq?yc&4`zZi!l&VUZUX@3J;z;g18yA{wod+_~5*VnH=6a>vXp&{Z_K6Tl#R&I!9R zba5poy6R0d%0ol9voej{zNw(#2gC82z8?!a(@Tash8ll$Td+}-Y%P%S~ms#-VK zW{+q2f*}<=GYKgvyvNQ})^2l%td(;1ie5tx6b0=z?VGrHktXf*35N&>wp*+(Looj62r{Vca7)qoP$vEdMR|5H#1%m_8sVZ?eV9t;)hxH!pX(RvDliQLKkz zMMQJQMA*25ubk0>Z0E^1wd~&66;~WQ=!R%ziZnjeKVot;Yu|2!sZ`qbuW<)K_m)Vz zb;Xb{W~1oc#zam2A1DQfJkHgQtUZJM2*QSkdePZWpdQrHRT&_5tJ!z;lJ0P z{V!4onFW0j;sn@T`$ipcZHr8+dgrCumb^juUfRy^!0;nWLjjgNTT`?#2YYZgRQh@) z2)oBeYQ6WBr|o5@#fGLGFw5%&7bLRft6h<{nNDKQT4h*n!@#on?Q6?L_(fh`tUqE< zN6~GGVQe2rQsp*3rPfKuK#gO{t9j;;9C?qlPS?w%xwVVlCL;z(d#Cp8{wO0&adsCu z&;Nygm*R*|G~o2W&R?)Q!eX`Mzwl zh-Z{DYwvhgOVe)SVZ)Z*Klk>=QjB=EveLwrJoZ<`jz3nc0L{vp=^p-4G}B}|o(4sX zzIScSvWj3&My(7({l7CbI|3U>i|wb+CnpbDSx&Nqj$hO|S!B6p=H~X|VLSi6<7{e5 zCsU#lBIg?qyK*Shy*A@ChZHIbAqHA(;f;&IXWK}mw5d$JCW0;H!n;* z8r?wAB?Fky+;hPs-#P={g8TwIZuunblR@%NZ@lX2KGsnux0G`1osEY`>VD$4j!L}h z%m4yL&|}ldr2S5U*i7=uT$ICincR$w{8EX_nML=yTyB=4D7ANXOV~J#*B%3wL~Xs# zU!(@B=dsBu?*AJy?QMahIX$K5QS;53zQs^>yyI2su4m(h@4_EB^K)|2kQBn)s5UK! ztCJTm_$h2t_?=GMlNQHF&c?_y>Kw+hS!l`cvnXv%&%hk>qSnxJQ?hx{howy@Cea8j zPBAaLz$q6w8sRZoF1B{A?!`38gXg1FX4-70d8vP@H~u0j*Xp~%HQ~bFxLxU;eAd#z zQlsKU(GxMiOtl3dJ*fCOeZ$DiC~e2aksIR-AHUO_WNlm{Iy~vLis=m#4Z7FcW{;vXpHs|0IzmkTcfdSKq(& z8WUIU51BcgR@zAgLD!Y}zi;mjzZ$Jt z^4_swM&6aFYPv7Mf#aI5+NGqO7v%LqpE4%#oQ&4$GuB}s6-wlTToZb^0v(WR@1xY8 zi@wdcyzC<_bq9OtA-ibGD$tO5`ZHZ+g@{@L<<-mMSr%yVVHxy_FQ<2sF~kN?kVd?C z*!af+9%Tqznv4+K8Jw>6>_t&NDRS#i*E-t|`f3H6v_pT+eQI{TjxGI_ZB118|S7v6` zh*(t#e1@dV1&RuPItIZY61Noqg4Vz*z& z?#T6u%X)f$QnLMt)$S2vD8@=QGO-acdapjmGRJv`@Y8^EHy-H-JQe)9g?7tTkD;xG zxjPJancHJz23xv-3IApq@;jkc#A-|)f^-3N3&gy}CGkruBCZ5nx#M)l_KvOQ9Zjh7-7sx{(nZw&tB#+BW+GXB>WULucR z_V6E@E`KG#uP<{8osTlyHr`Kd!!IOsfQP5Z{crK}uaOrc>uFQ- zb@yb|Qs+J{Z@IoB`hUm9_B^zfykl%VKGfpAdHwI*^{Qn1=N}Z3U)5gg6KOL(|8OWM zvtQfu1cw!o0S@Aeg;4PbPcm${i=27FC?;WA z+P}!{pJcdcNYXfY@E_CtVYfeR3)d?4xk;*C`~tD}#84HrTFjFx+BZ$C+)dk9EmG|q z`g3<(cXwZQEJmiw|44~#fbC_@%X2c^oHd{98e}Jw%I;a* z)lF*EPa-4LG&ZJ1dwcgZH}}@b$p`qx#c5{m2Jij&!FOGCUt?{(GwbVN$RH{Xso`e#P?mr8q+8|DJq z36e_*dIm>^?@hdG`>131;aDg;u{=D#7*_fJ40|7RF)~|3|1b$R{>7PCidg^Z<4iS& zd8FwFt=pq_O?;y>7aznHrPt2-bTQr4@f%HDoSdHm?Jt*jzLDQ93)5xU)VX>(n|s1o z>kSomcdh%Z4_Y`;JhSz=jv|X~U?6AIu5aWL9~5bHbY6ZbUr}n^t>M+3;{6mBprj|r$o7C(3N*W6FSQlcQ$Jlnw|t$*!I_PMLBBNFlBvN`Mn zdDnq^`|qc?$EP#MtEk=gC~&wwvk|p=UB{X0dqU-I_bb!wvkxoprWEBf)kyP5lSoWV z%+-II`K9RDOaXgs*)Np_OUj?5a~@X18}C0Ci@svBF>*5p+s0Xh^{HRXp82OlGwS?7WvXaf_k>I3$d=Pf zmNOd&yElwHRXv)9rP!WJvNK2x6}$IW)>Zxp`VqhX-krgcmC!xI&~r5=4zbw(iA^-Jkn}uAV404I5w7`G}H)zwe-SrqbAW za$`tzvD5t6RQz*~g#oVAt{c5s*Z0ovJN@h_96v9WRh9p_cYgo6uVs0&S@@%dMt&-% z$G!VAeqA3OU+wF+q$F`}&nr-{TF*>Q{}35p(K~s0xg_SivqAu0u=d$YWmR^e|3W?1 z*UlGD_%W%{s0|u@AEGca`cY7jXt!3BeP!_4wc+ynRleGP2BMFBiW2s>WouHg*e40U zW{vETw+&4J#bcdT=NYVz=_hvwxEvK|zs6Tl>F_62ee@!oI8wKn<{9Rv%urj;@1GBK zO};01H+d){|FZVPW!_vRO40|q*TtnW?^;!C?PllZzUb!Ai?KSgv)?~?Ej2WB{Ofy* zuCB)bWqhFeFRB7cUuW`?l5``bNC?QqM=bZAuGePr5; z_7iRDxm4H4_+7Soq0BTaKdCHE%#AMlQ9PTml%yx6sklfTeyMxzT%tkVR|#Z^@LKEW zbmR@&U#p-ZD=b3zkuc?D#H&CG2z_|%u4SB7B9krm4&j~4H+Bd=4!r4`RrggY%G>32 zrcK@Y_tTpOADwEu`U>=d8!0?nwVt~3{B3{hK~DTGa?)I%(1(_wN6m*vPur8?8E=@G z!Gav59QA>0nFw>RUMZr#eVZ6~`zBZ|Z`aN*hGIGwa$H}3rzJJe6(vvA54Vo_$bu~Qn^mrl8=pk6?Rx}+rCG~- z=EnGqq^2febwrZtmnziaVg(@6~ z&<%LYj2g$zKq#VfRBj7B(y?QkcF-#fA~ZFJ{!vg&E!J+-4t*H%=cbRGX(zd~rqIt>O3mak9U@ereK9+(WfP7lscEO4{(xnm&oFip!5ZNq%1bqS;Tge!mo^M>rO)s0mXgUkwqFZX;Ayh0 zAv?M;RbtH7DR#!YR@U8zlKC$t;vitZZMXNv|2~h3v`&Vd8YTB1;b)%??WgYA+@3Bi zyWPZbdG}uPkO}TbW%9?w4il=QNsCXDL!Zj8{Ba(B+4U)Ha2NGe+g^HBu@;q?ZD%h= z*YG}5@vi5!q_0{VHu`>d*jlG-@(63ado9iNZRm3FXdKvSAKfuBpo1E;^J|nl;?(^2 z+@8Hd*GpabJSDGxZt425=%%E;`j_X7ONBo^**Nmz!-Z6>wm7L1(2}DOi-M04)P9_& zqU91L;Fv=P4}U=nH?!7)@?6g4hpgf)ASIxYjzSs(Z@J9YW7O3CD5f<+N8Z!~-Ffs7 zET$+jD#A_*tUhQ5?qP0eDOGu<=F0>+0tG#3P(k_qiW=&-=2^I*yDCP9GS?&@e>li#m!uV6jN} zeS13;m(uM3FypT=_fQ3(jnlhl;NBL9#3pv+J^19_^-i&S*cnjeu`s>s?1W=qBywY% z`hWGBtM!`v1D#J(GNuR zPHtsvy*kN4b<<0J{2vNvU{vy)KFzU^KDd5e<*)0W&{`F=wKo&P7#}&RN0PlrNtu8z zWQ7TXh;o7_^u#AtcgI0+fW~wwhlly45B;_r3ng-$GY-)EUB(&uMxu=G==@x?jJG5L zYCkVjy_8bb<;B{U#qVV_A;E)Iif6u?XyDd*yc$cOj2}6&C)j$q$7@HtS0e5IvA?g? zA6xhE6Dg%RCEUQv8@;$5viWu!@C>T~oR^nfNMVdTTt4H@av?v{W7<#KqEIAF*1_Yj z_~3D^O8Q;S=10!l20s2%G?LakU+o*bzeL9oKSib+x~KS*I_1prx(ELz4aI%2ov$f) zr0aKiHFx;i=Dj$%NPob{n=ZteQpeKG(pa9f zZXY?&&f4BrIIP+Ib(%*>lvgXWIP#L6lG5|O9LX>s1XAL*+=i?oB?YR0((rIKP0jww zO1&)YtjCY#)Z3qu+=7|oEqzQ(yJQ~&Nh)>f6nK3flu{p4%_mO%svTVS_h;n?5M7a1 zuj zeXWvk|C2c0G;e8jYH;vjg5<7=?+IG^mA3U%ZXq&n%zHWJ`r;sIVv}-O81@jJ=`?n; zwS~g*v6k6z&hvE`Xu5tTNT7_$BDQmK{Lz6*XUimCqb!bdMesSpO@o>#!DoUY4*Qa5 zWAyj;_w`+Vw|Rc==FuyAc#eeqWLx_a-fzFvF09&BJGnDtmDO6P(14?QNU&!cWw*7P z3?E-(U)PWJ@g~PZhm8HUQ^?V|{Gmn09~vgp$I!jcZ@MC^+Q<(0P zs6Byi<&!gO)RfY*MOk|VBH2l->dpoVzlqsn5Wi^eSaIh4{P80M@zUK%Ai>D+H%gye z6bVHCJx(nh2p88Xy`1|2XIgI?Qe zd>eRq!Z*?RSV@@J7x;)81-F&`1yT-HpDv|d3Y6Hvh+~Vl;8_ogRe=*_$cye>`W9@G zu0;pA65AI?8-s#2vwBP^qYn!mTh_WcF?Y8uCxo;t5*-ldmH6(z{br zQs6e(|FFrW)B#@(G-IvNL|5k#1}6@Xi{U{&abguV|KDh2x1q5Lu!}fI=n$o~@3`Mr zsvG37`_gBD`0*u|9RUKpk-N&(Z06>v9vnJhK*w=XJMJ<0?%mvvrag|n&dw_L^?N}qTd1hYp`n$K!29`Q z@?+v_gpj?S)8c|gY7RL$ThmOatCVuZc9Rpo@BJ;IzhfoB5?=p%z1N~YXklPR($F)p zlCAqF3tgq0?LtfBztPcCe}2}UOmDwl?Iy~fo*H$9I_EVe{z)Wvaz^MHyr~}k@Zn?C zz4Q3jo#H{aqaSW-;LaT|11DHV`67+#=yfb9o&exNX4hlu~ZME2zc`o&}7T4wJkducLB}Wt-*}e`9?kCCLeOR;ASI*3s z;^|X;-MzvQWL$6JWY*SC*=Fezf57Zf28(P|KCg;ZMxzbCJ z7VTHFOZ9D!FT9KxIFu^(wyh;ch@0pO1KvYOImu$>u`RIE>2P<8UL7gcrT?z4mbC}RZkh>Rcy{)7WuS=Cc zECtENe5R<~b=dFuKAG5+QR??H&h}l;AC>SdFaM}`IGr1Q$h`Wdg~{E5l$&e(36-mT zS1VS(Ti-UB8vj~svAJwf{<%2Pe$owGtn{;fDuRbwtL>*0 z@(fNzDw(+#{?VyXduSyh-NDtu@JN(KsH&}Ohhg=D9--X6qw7a+z5o@XnoDz&O0Ru+ zVy6CK*~;^)Ps1m~+sw=jq#f87uYFU}Fjn^*kxn3yk+XU9&&rxkY!27$qvf6^Q;jt- z{gd&Ensc*F(36o|nt`o9Al3xuI$e<$!YOmH7#Mzg|3v#*Y`Ff>?0f zkD<*@$bIYFiu^yjp0a;Z66R!J(X!$(bQ2JhL7(&OSAKD0?Rqwa~OmT@3|G0@OJghi>Ut3z8?Ihyb+G^Y35W=j9F?NtWqnOes% zhHU4qU9hnI8h?12+_PC>@T_OW3HBwI#GO+H?I(!4eZ7iv9RdlO6Xj-CnFPW!1P`8#1SlZ$MTKcex>ttDv$uYg;>1=`j2s5Zxr&^ORC z#=8BGet3~ej%IT2u3DyyHK9v;o)4VZo9PNP9l#mK-7<`mBR!kDq-9qES9NrZGR@Q$ zh0Ha*zjqCcU9F3w_pT)bBQOhVY5hp~Qnx86BIG__b6E$tvxsIWKyO=EU@Kp>Tt!%W zK}l)$<14txPYpGFCi?d%|J$ z0*Yto#p>%37oNsN`=4mt`;G)pvQGg?_AoO~?Gb~@cea@QV~xoKE-q!bOmH>gKbo4E zeS*=06c7>uJ<$S`nsSMyZfgZt6)=fH@`f>4Xt5)0kqCp*?y*)Tl=XL=oYE|(T&k0j z#t>+t|BLBuANB)SiK$t5IewEzrQ4et8YZTuO2A8i77n5?C<6{2dba+Dhj3eA7gA|5oM zgGIaxf(d;A9EVKUegH>doF$$+sH+ATyv5bf)~3q4)954=K9KvL-@usycmoz`DKsCP z`7JFFF@vKH{uD@0ozG%(m!ZZt4jYG%Z>lwV7bBymvOd&XP$!T`ToVu#vqNN$QEL&8 z9~&T*3J(tlG@Ssp$@>w+W?Te7ud>OwNst2Mx!GDz|NVCC#EFs}JPInY63E(Mq$gDR zSPqyw2@D?m1V%s|zIugN(gpe^#9?|6Nyo<0k>HM&*yO-ZYO;ls21ePdz=dlIY~Q|} zKq>$`1bwj$2>_Vol@&0tfhECvNtfpamz;3+5WF5ZiDF}=5$!=a^ZV~#2py=`@6b7V z?HFQmpbZUHNAOBwk#@Ghi@_KVpcISs<{@gC{_O_s75;&|22f@h0L4kBeJ?+u@rdmR zlf)Jhg5r1Y-(wXjU$_7)=5>uZg)0Qk(i&1gprsrfeggh!v)=G_9wFmix_5d1e%=m?gIe2+~HS^ zfn~bF@>2npyK#$zhYYaYii$KFf$x7n+md*>uB8Pnn3oVq;bS0|2V;ob%l?RXosh;vEvuM2JT5oi=}Vt3y;qO)AzSa22Fy(k`Ogi$ozYe5ClZmYM@*KTre|^7@Hb z4(kd(-4SO~k)F`JCuBI9;{gTvo?r)qhXb)HI?6ubK|xzbYl34N{1f#$JvR!WXD3L+ z7_wz(_yQx-#4-h#HV*^!PfI(44}gRFRW3>erY^qmi|Xa));-YaJLTniTb~_77h9mS zvDvY3a3_MeB@iiqi9(s$PIG-$z@7m2f#JE02a1%?d~(o#ZTSfrW!C-s2LTHOecgVt zH6$bir!+x?e1>VWk|g}daj4(0R)`OUEvJd%0m?O~#;9Wk6$H(Y;GxWu=$cGV!q5=- z7r_s9!E=QL0@y)9{|&poz+G>~$4{S9k>KHh$W^!4e7DR+P0d&M9eCOV zc?|=ZGcsthN~$5HTEip^3RzGwaczJE4$A^SviYzl^f{P z2T((yS^Cn_0)?@zUD?3Ukj&x3K&NzL#pBjNJdv4^@p~ddJr+$r7tCurLG&*bR3G#7l+ucqtw`;yuD#BHa!yYiUIV;AfxO+N^Nh;d$>B zF_guc$FvX}r%P~RG0H(KkLL-M0`dO?o&&JoN-Qv(rLf!J$iRSu>xH*f0T4!RjN^A^ zauU<;igCe0O9@sm>~T=}V!WvtC}}uWw$stEow%cqPbbFO2Kom~^F~I*Y(7-}G#s0# z*3MHG7G-ZKoqrM=8xq6I!ND;rEHsdpYev15S_THh^a;4f0NWI#+S!9BrtU>cT||`J(#T%A@foI6poaVk%O-0xo#5UZm2N-uK#! zA7eH5?`DneY(9R?x?4iz#;a#~k2NF1_Uio`8L5qDTOIcn(kEkJf10!c6af=iYAUZz4!U4vV}Bn?&X>pKny z$4%d*%rZ^aJM1JQpDziNFlylo?65F8NkX{$9*F?q`nS4m${sLMNjZAN(`F>q&sOe? zGp634o(gXO{LPbJzv3S3T<{F4k$jLJG`-*T`m@qSp+9cMg=+3uP)u!ZF8z+1hgtUH zlWJy-yL;l)&buqKW|>%qr_4qNiWoTh_^|98mPbU{VUkDz3bVsXX zv%Ol8hnmMth%qb{8&eA0f6VSCjC~Q!<_q;_tgc>`qiHIF9vt+h*XF(ET`3nzy{;@@ z%T*BDVssWPIb5rN0bH+ier0nWR9?%$a&ckd_P#z8Rifp@xmMP3Z6h`Ys0wxsAkbt7 zbwJF6A{Zil;PDX{i<@<&Ot|PB|4bsWgkF$8qdhT!$Q67yAevZ08*x`YdGZ9YFuZ{& zX=xicw72@A7S&Q;Zw!S!-%8{1I!q|3D}AX=~QCMaJAv)<5!Ed zvA2im1Jyu2JxUF}ZP|4vnEQZ^T*7fzuV3j7 z*SB);JXmCqU_kL4sCps@tZqXIqmljB2ezf(pk@lUvsnV#KgT-(k6T zIQx(?UNnj!?!O=$n_Pzu1v37C11!nc5_1N*CJ?g_d@Wp>=eRo&fu5&nZf$*n2|5_q zYN9RB3)dPb`uBk^2Tqg_c_w#aM%F1@Ow-c*t&Olu;u2D((M5t&*e(vN1-#fbo>0?nk|ar}B_b1ExB6?6a-} z4{?6~SBB#f{4H_nTwY$`JPz1Zuu2qK?2ArGKbtWb1rYYxNFjX2#9T; zaN;npttInF6aW*tuxV7gn^0`eN-`|N6 z7dYaOh7thx7WJ}wcgg|dj`>V(ZfIp~ z!A&sgU0qM^_|^-hFX)rFHN6G~Pee26=k+5!>n_2#S$9AKV~QIz)brhc>&Ze;DUYv? zM?uIcH+}s4F!Hjkww4QzNl3^F>{Q(3xYdMUFjC}2gx%WGLLi#(3DZ=f3t)6V<4uK+ zF^8)d$FW|SU20B_koX{gL%=%XIv7AV1XMy?t6NAC&{kvT6zSz4ox$ASVi*VrN+J?E zq+r2XnZ&ArNbc$5$3LK@$Eb6nhPt3VE-fh_pptEe!yPPqeqps{WfGPk7A|n<|02p+}LkkEMW>1?$0$Mmqh9N zI4G<=?Qk>k^YMWa!cKyt4^{v`j9}Omc%+DtV}4fVBTSHA5)EJj99Q` zJ!Hc;_qliuy)9Lm&bbwYhKLO485o9Py@EZ;c1(!|fbq58;}2=M0BW=T@>=M>O;S`) z06Qp`Pmx9cfP&w^Om;CxVS-ty);}bzIKS_+E1;raZ#Euey>`Mhiwa6 z3o)38`=6iKmQjp1#8`>p0YX&<-zO%3`~QZV?jP95aVH(ol>3JdjCHJ%#V@6Oq;a;P z799IWQ)|fATB&j{JpuU)%+K-fnBiTad(I$j<3yBzzinUn{xI7lWp+2jx zoR={xQTiNH#fVmD$Pmz?K1NJ2hmoKH!4cj$?(}7F!9d^3LzZDGhByVjg*hbl?aMGU zBCP-+6t4ln2nB(RE506bOz(DyV=={Zg`y=`U27N!Y02=McM2feDCrLf5M>qs^ukUx*m3rA0->!izn$C&^bX zv4-<9+MI|n7|QFfW5)BT*W0tMkUdkw_)m;Ilr>DE=(Tmsr$%@UC}K!O#XX#{1jRWj z3XU1mA-sn^Oo98u?MEl8Za~6Z5Ulm0kz%{Y+D*N?8;9ODAaAy@S;VWa#w1f5XX$Ba zH>|nE-ru@h!mw50w;pFoL`T*a}4Kb&MjwS>)~pO9|q8n5Bu~WnQ!HXAq5I zbmxmJr)Sq(R~JIH;9Cw~FDj()GCQ^1LGY!CuN+eS0d%wh4nx5n2f|j09wht#KGq#x zXJ-sqO#5w}$-|7;8A$~)L<|Uq)&&OQ>ll2D2SF0?*jUG}AP@$)mkDkq=Bkr0{mrPz z4--NVZh)u{eHP}c;0rQJ_#iYs1bRBs3d~A-n2#eo?&(t`u#oMR!=DFa!NBZ!h54bX ztLfb!3ojx&#EvHNP7IM?XJrj~5blqP-l2_hP-+4D#u-M>KZ`;PQiLsMbgJ=NP_0`) zN{GK;mj^`jz=5~emXVRfTfsp@V%)Q*1gSZuYon!e3dS8IfN&8Jw+Du2;8FJAo(B~c z@-#+~`h6F}&F@9R;1?UqR;*`*g5TKKXtZ6a1-EPds?;>j{)sn1!KHLUKKHzIwAtz z1#N*#$h`=x4klJWgip;gKjwEGZxMKkSFccH;w0a7f~2UZhz>~j`>*)oMXz7ynyF!y zH`~5_7S`4rsGYsR0HfA8DPnMA8oMJ~6a30J*YIG`{<0_PCB$GyV`D<_pp}U$4;(*O z1cL}di{C1~3cwYX{kd^UsVLK#;XhujKg|fi37x#*QMD^&HmfeitEyy_ec$=`Y&Gir zUZ%569zB&^yM*Vo0q@NTjQ6xeM2vp=S`>SI!uU|^^BVP zp>O-yi|S|#Us=$I)b*2dHN9|kSI#>Wb+FinVVnBJ9XmZMq%yW;?r0%jc+Hy=P2Q7V zBmPP;eM!W}GTjaiV>q%!b{^kRT21qw{O)sF`@8;qN1NSLw@a3N@t}HyL4vq&vn~lH z_HUmT9=X$P9N~IAQ&3MqPc|*pLj#1B)6|)56={KrCKO! zeTm)vtXD%EdFdat*^H{XQeAd8RsVFPmH)Y#j4iZOo7CZr6M_a81K&~oYNQ~l-3yj^ z`ei>*ej=(5@Jor|D8NlSKBNU53EsW!-8l33kvqLni|V5T`-BnL{J@CO7w2L+7*jV6 z-83;F40uSPy=EzR6^s4XaLBKI`$R!<2)G~cvprUFxkWIqLzLW8DfKgc0p^pOeLE;9 zo;aqOss*=e^Mp`DY(CeN?E_q_BLN7d#WAvGD*hhET%r4mJAEF}_y_DW3`jsxt#INOB2}u(RGS8SBU7)jAei6^$Y8LF_9^ zmtGK^qP|RfcbTypQ5Xz+we?&}MmRkqBa4mdspw?X@9m0cJzMoNbaADo+YmGFoO*|TT#8!2H1Ac)V|G~fGUWRA?3g_tL5sb=?L zP!DES|2etU91C9c;$h}-TKiH4%n<{X@z^M^8y`O0N9i2scpvM;W2Y4cF}VvwW1;m= zG@CH|S#NF0M!A`j!>36AV?4J|T#J)-v8(f;MVJv56eL&i3c*YsywjlU=VWByirj|O zsNiNZg2gdZSP@Dn?NbS#TEibB3hX#p5w=ZApw%o09vOi@4cf>Oh@8b7F2*zxNxGdF zn+w>+C-j`$+@w$alq=O{E-&V>k#xs2ffyN*yu3UTyk(rb@yD(7OifL}&S|p6sfNi7 z^B78jVse0`*ad(3Z>VQArEF~cwMI)01v4xvynIy@&T}rK4`GY~ByJ{8^I<8{#}exV zL^|jcNroZbMR}g?l*soB9?NNzbv7raL!$hiB;6mBP{8yuvKk=1Xt>~;XT=mmuA9h# zS!__7Jq~LqH&+TOK?o-^v$8(FGWM5CeAnE})w?j~{NfKXfFiwl=n0^XpyeWF`XI8T z<$^2(GeSi!oDqn`;pXG(iyqRJI1?P~`4%-V#4tMV^iX>B9PUbe+@D+xkewkOn=bkb zJ0db?WW^Ap1u_1Brp)6|Qxs6M#2u$Du%?Nc+&^zi6v!qTV_69~pJk|E!X$&UL#$sCO_qxZ7b<1ujJd@xY z;>6Vil?2iV`~w0G>Cir^6h)(h}4gzlh#aTxih1YQtluIKNc@sdHL2`qv9m&`8 zm>5@ZE-?O=VOd&O1w(Z)51Io}Y;E&vaMRn)l=h8{Jh*{q9;MP}_i;&RWlp$Mqs+|K zgxRtBZ|?6W0if;n=cgHI@yCyyd*)6-PF24foY+Uhj52T#^H%zh9u z;IUTiN>BXz;tz?KtW;Sko!C2DHH%qT4`u!w1V$P)%V#C1S|A~K2y*P)@Ecqt@{!cy zzf=2(1PkB=Y!WikS$&$fJC2u}ASg@z9aRkty_9*cRFUr$>kVSzVHy-x9~mmkGE^B! z0!Md+?$@5HTDp&W7Ec_LZ3zGgCI*a*Xdu_;V8$jx-iD}|&EUD|CCXj6t z)#Jj#`dmuNh1HC*XNYQ$w9Jf-dfoYj&51t)lK=oqdHXkooEyea_`wB@A`lPFJq=JPD>+bQcD3wCHJTr)Xs&qW3;Ts=>SiqEZ86`k(1!7{$TjYiw*x zYK+(Z-inR=bz(yP{Q30s^zLLHGFiR|sA>Stjl#iy5B+aUgz#JVS;PeXG-(m>A_0KG z#wBKCq3jwdtsQ~VeL31tJUsPHO;E?E#GMpBapFo||0dQprsfD29AZ9 z8AK{&|1183frZM^@88ZpW5GqxM;{JcG0(k32n<4*+}!#Fk6)XchRYeB&m1C(!JTQ2 zC$HJqYH4vty+^AN$QT+fqJjJBRWl9}=l~WM zH8QmkdH(cq%DMbJHr7&v#nRF;1~eN4Zbdg9z78LEL7 zk?Mmi+E!oMJm}TXg!UZQ!1TMcVnHgT1dJFV3+VNnid$ z+QH`FrM{I%Y}+;6=?LxEK=J`zccVj$by_std-&8A=$Sd6gw44`kjS*fS@!Qgg(@t{ z%+gL1o6>!+@mCY+IP{`?G^_MJny+Z=FsK@(4zw)4v zkgtD`B?XB!l#iR6`}lF@tn`8mXYBYsT$<>(Y5ME#qky~?D(diDVqhf};?`~3$VgS^ zUo=W;ICQDC`%|A$>>C&$6pSEkuxEKIEJ=HyiH8lP)$-~*X zR4cRf{d=H0M$ovzw%tZT?+}+9SdeGV+Qvw?E=QiAWby^Z-RMte2`M$Zp@gi752oLB%6cZ+?#j<5>-&R|Mr} zr`RbO8A7_Clv)6WKgdOx`5g&(Jf_h=aS2VA1_klu85mGrC8mY2mh^&aR6qO$zCAN-;7#9TB|G)me&he6Moxj4Yr<#P&)8U+s!&z%9 zt6O&*NSqg}j7)BEn%uf`%g)H*)=kbEE}ZApE^(eWwYGP#H?}jgao{|7^OlpD@vT!f ccdYD;?jG9O+WOiTKZkVToSJ;DoZ*B22N{CH;s5{u literal 0 HcmV?d00001 diff --git a/doc/pics/tc/tbf-qsch.png b/doc/pics/tc/tbf-qsch.png new file mode 100644 index 0000000000000000000000000000000000000000..27c48a63d397502e765ba9f82e2f3f6fe1872b77 GIT binary patch literal 49533 zcmXV22RN7Q+x}S%6d|)zvO+?L$R-+H>GOHPayabE-1)qyM3EUbaA6=U}+dScgfL#VMLB}4|Bi2p}p&g7<%3> zM|55gztfwSbGYqLwClkx|J@Y#xi18_aFn!6Z*93fonEGqBR)Vgu{bwpw0y(zhNWx8 zO4}!iKLUI$!~t@t8u%%?eFsxCxMS4!ybhrw4_G**O?!fleB9&}rzSUDbA`sG7x<)4 z>J#Nu4eo6@kx2ZbtpIB_{NqiE9tH9MxiyB@Asqe|`bKkUnwc>w9O8O?f7jV7?;O3P zeIh-p$2v@j#KKEU;7qnomIn7bgWtQy)EzZ4wPSl#VxnqdHuvZ8wFo>k$S!yJ6{pG> z-7>zmwq}qW*FO35$&&@QkDHsGncB^x^NfL=U0t-Hb#ZZVL_|b-q%{J6n(3vl-?*Wq zs2Fhne*Xs-o0`CV6B849W@PV(>0fAOzP(!ZBr!2NBZG^24?%_}_Bk;#BV+fg%QanH zUESSuM{EL383Wf=nWO42^TgT>esur)v*(D8J$+Ep5@v+=e#%FwzvLiX=x=(x>pORs;Px5 zrFB{t*4Nj2cz76Oi%LisX6u}$JWjoba^F6p6!*o6`T6-%S3gpd2jB|`ST{#UN5;S( zi)*5yqO-HJBG%p47?rYmMy?1@E-p=Xaj7RUGBNE9kRv>5x-OLP@NSqSYkNn>qxQ+7 zn_Uy1(=Wb%|9up3&8Hh5n?0o}S)|moH}rN*`P3 z<8KDb?v5TmNJF!*Fy5k*ZD3sH@wcoG>l70c6U(bRJ5(VpC3V%r#HaO%$ctKX8;U^F zPpz#r)zuU2$)_H-5T%$mguDFgZfa=>4GY`1Z(r-DPc!``i=Prse}8vVe(+Jm?o&H| zU=h7l?D6-tLG~Scd*28F_u!*u-|4#^^2A#ITmO5;Zjh94`gDOG$?gYrX2QXYflaNg zWiG#T+}u{XGqt|H(O=wLTTJ%ZO0ej2`Ss=Zx5ChdQS6ahy}3VT`U(;f61KLsURY^* zlnhF}J&OHvDe*1BgDR0SvL4ox3!Cb!MBTaq_Fv&UIn;W^lnf8f9Yw#Yq%^}oLw^51A? z*2etXSh4HPJ(d(Ksu($}tCAjN(Rcg%dm`VEkPD8ELMQU7EBEhj$(Ik>UsvzGI(Mw( z)6C4weFc_j&BMGw>scoW zp(6Lsh=!VGn!vRZdG%Ksr<%vR3#wPbKGw5yEVBtFw2dnNlo@oIO(yp9 zZ5TZftHK)bwZzGi(D>;SCH73HnLfP<>nx+R4*KKjpGJ>Jks#M!M{g*4bM^Xna?iT_ zNFMu~%Spk&FX_JULV@krAAuI8nbE7qTbO>(kOvU^$sDD|rG2b8Y49#y+=;V<{M@;7 z%LB*F^k`Ex7-?vPe6~Eb_i4Hxij`NaY-#uwRW7D$- z&&08}cyC!*^`@Pt`slD~ANBo#&&WP9-^xmvZm-$#M|Xx7*H%mJuFSfPU>`qlSG!Mk z?H5tynIX@vmzP)w8a%O5Qc_;)OVb0TMXH<|z9w2K0hh@y)lSyb)r~(k@%EF82-wfp z!py{!ZBjAV+sjDKYUU^NC@E=cxauA@Ow+idXIpW;-l~YaAwkP=oRIwbYmSMu=c>TD zbH)uHN9Nsdubk22iz0hsx-iL9uPcPKG10QUNciwT|MVYR@~#q3|Eg6JlDn_ zqs8hM+aKHf`sSslW{euGejH97Cnx93Mqs;OUP1zs)I5Hyh=>asy{xQk%i~i=M66>% zLtAuBykcWwYDecg-v01EC4+|kCEF0?{`+&q&;SaC+1|Y4$B&Z`aFqk4u3W0|xwrcA zo1(c&KYR#3EFmf)axg05vUVDIdFYv6+j;Z@I1>8;f?yP@*i-`=eaZEpp+qmR1QKV-r+o-};V zUUrf#m`m-M)lm7rXW~wHHo6&ViKxtFUK{lLloCIa5m)tO=~}iv=_z&?MZwHAE~USC zUiPE&^yk(2QQbFsf;T=fT3Sx(7u*=1m>Bt0-Xr)0j*=Psve{G+HSe_iGS2lCJ)eSHbSETNym+9#zwmR>1_(sy=t5(JzkKBc6j z$llv!>gv}pihXy|sCe$XU4`)n=bJJzGHFU-Xo2$!2g&P38zOKOKW1mWCq5-SdPI$8 z#=!6x-&oe~!dtCFZFQ+9m26*SW}aaB$33w|Nex?^@Qd^>iOB>b1h8wB7%b}4F@yS zkG$_VdhX8MWN9DU8#lh>TeLiUNV&P3k?c)OU}1UM(V-D%vADRXA2(3pBTZmrX6EGJ zFwK8@#_mDg2rvTE`Co!X(Ol|i{(%7jrceHqZ|o;9i{6S)Ol-|ZEts17Q8WLuKH|vG z%S%slDz-eny}Jo8_af@gljW*BC!xm+!&M}juT;|IEDnU$Vbw*+a2Ku4-`oJho~T3)~IJU^nyw?D`N1^b2O30A;4BHy*e$pSv!$1VDCz$!KaA6#%R zY9HeGT5P|)%fxCsI5@a{>CH1IO4+GBH}&=PO?>`|5Q;y1z$Kq^n>%{;?30tiZzJ@} z+~$eN%qo3}^n4sG+mogLOicwB0be7#u^XY zs>GH*ub?35y~(IGF*YWA`x_lsj(%L3>mOEw{0#Nv8~S&K%DsRQ>S!%L#S7k<(K9hI zAuvk132Y28oqRKhhY5HZ78XWJLsPsGvk`1oA|FUub;}_<;CjG``8aHsyCfKa}*ZsSOPW8>e-(CCSVh$CNbegNu@7U#7+ar}HZsqc8pW2YgyW-i4O z!g&I9Hf$}Col0TML^9W}v$y1{q||U2BTQ9GfAQklM3!#OE`pPjQ%n>a zS7FJMMgi4^@WZ?JF?Zt=sQWfHpb0oDz8PsL*E~EsB%G%z9S3)g7g1%=kar9HtM8~1 zM8lPSBx8+bcUR`pKY0A*(b+5F0EEETGBG)c!sv!GajPmzSzDEz#<<)wK^qJ9t+tkw zU>_H4(MLu>Pw*`&If5*J)zirk+WrOo>9uvt>8xMoXUJZ)9D)o88o8#JGJ%oBU1{Y^e4w0-LGC9Y>wrf zZ;0499hjc7fYN1dPQQ3uSC6*jrn7M8_wRRmb4>x_e}2h+ii_B?GPQ|*nw1u^k41C_ z2=(S!X=!OfynrdOp)#${=KOhr6%g-+ln^iIaPZ>q`-Xc{Qf55zHr?!PA5x5P*D8t@(gGP)giIriHVn*nfC9)8%FWZPV(}$ z)Yng73ruO8q~7!C(QGZj#!m(NB3K|ZfR?GzcnreHDNwuYjj-}JcEjyyt%o#`b17`Z7m)l zFCBT5?=v$S8wx@^V5$R?&QndDm^99)Gm~I#glp#JB!|MT>3?Zy$+8{z0En@To7JG5 zHzXmsS7`GU?RfrA#r7R1C#|d=xztA=+b7$Tcl)AG**6R%CMMp#ef#j?!?7yq%IqT^ z+cJK3t#^mUswoejeTQ~<{ljKQs=RsqL$v81(_I;-j6YmAGxLvr@kf!JrmnX3eqf-e z-C%!rcg*U)e7;gJjM;#cC*aBN?1l!h8<%JLRMMe5m@#*U4i8_GO0KZ(;SqY?AfS40 z*S?QsWq441#5p$?4!T~r5ZO~hir(MW*0z8D{?)X)(RrNju5801hz8griFXYo`x7`E z=TKA$bExB>&#BD?d@ZB%z-afRgpG35!+yQYoiFhZ?@?A(_9YHtd_S{U!y;lWY+6kk zs}kU(b@}pTl(IfN6-WaPo2&C`iDwV8D%UxvFE!|7^%Pq77~npk4Hy@LJfF0`EbGgwxN{Pa z{r&q{C41l9?81U^T#R!D9-4cTqSp0aKZ%Zt8oWa4@cFr?zqSJ99-+gOWK`$!jO+*H zClAx_AqA(;&CQKRsQ?+CD`KdS<7jPbQ(}=d_Ek!gHR;R$9dhk<$yE`M@y}ns?)Js% zbB8GnOTDCk*-tBUMM@h5p>7v6TP#hbOkVt}fmC-#TFq=&852 z*LAJPtn0_;vR(U_hr5qBIF8BDcWX(>Hp|fVJ*Q{1-(H%VBl!0A>Mhxjgs0v=C6CHn zGQGI4050_2td4xAKFYbdiS6ek_Jvrc%uev-@0uzN;xCp<6~IOW(QU4}LkM5lCDjT- zch=;fKDT&qVij7e*}jm^VBVcp_I-R!D%mgwv}&z}#F z%e&ddDH+U36VV{yu{4Ep?OQNN5;O@`!$%+b7&?h%JF9SqOiQdr=k2Ad&cex8hB&`IWNOXY=*OC5~i=X*#9%>h4Yyb1-X9I;SzmqW|gKh^UBC zVk4?!Ypd7S=TAnm3_guNdfGO*7^}h|^$PNr*wI(fPJ$;>7JJYDh$_E+{aRU>;^ul8 z{q6D{Hg~5vUFPlc=c-r8@A~gFt>C8|95(|w}Ej{bN-b0sx3HPDXKYo>cQSF-uN3qScVr&mrLZINaS ztnwA))qPu=gKemgUp=%rH8lk+o2SP|M-C+^IyyS_nu2Dg_LC=1po9=rqQ81?ty$g@ z_LH%n8?O5M>#Mo#i*I5lx}k+Gjnum2LmV>9MA;$OvDiWmDNZhdoVV^s1YB({`!B^gc-6XuCTE zht}5Ckhp0{uCpXLFEngVh&lVJ%Q1Z$jmaOoSLPO6+8?-&MS%CrnWA^^gqJJAb*(l& z0?m$^q`LB+Wx4a~3u*;FCuiVE`&GF*NWnEJoO~^VA6zKN%m8475-6#_K`=N#a*>;#NeNIkAD=Xgi;Akw zpTXw2Oh-ro)=!jhz2o9i`0kzO($s-3$Ds8M+{;i+kb50gE~2BOGsOn6PA;{w(1zIW z8jcj&092)?$;l>Y3n5&?DD8PE(9mid8q}W%?;W6Q@;X*z2e=;p88V@9Ly7EJWiwEk zg@s(Qv#ztB$~Mt;iF`{-@2aWof7yQ$C3q-GN&p76pjf*#dL`vN z7s2`07l1TgFnS)IpE%X4xsjuMT+38j1QNpfqjFv~n#>TD^w;_YFVoX4ZrtF!_>Wfl zij)Rz$mdH*k|n?yj85HseeNJ9fI_?u4>*s}=*8`@WJnN8OG_VI|42a?# zN$yYuW{dv4TjAh10S1Nahd7;{bPws#R zoU?|83^YY7bEZ~Ccv_ly>7C!@+glrRbM}sxuMbp1x2-c1+?x4{E5+W%>0Vx0F#!;0 zeR@W4mA@q)kO7$Vcr?#GW?{Fr1&;TXkPOzBfB%Bqg6ny8IW7I-<8pMLos&@?hC}A$ z;_`QKabvdhkH%FrQGD&ik^H%}OC$FWvJ(vR7ed< z>FR#PR^NiOx%xe=z+f6}15ampZf*|J>->BIkSfIL9a-`AH(_`br%s)sqKZKK-L-qS z`2oZCx2isr6cXe_KA8Z8(9C(IC`PcVu3DYmX zN{&|tP(b{~rdYz&;Mc{)r#N;{ zX$G$tO7aQ|(-FM?{ncF_a8%E9mJ+yjz%aMNI8{TLoo0j%Kpi#%2hHAf_Gm%A&x;Qp zKCEr>LqRPrDTy3K^WltUVrDiwqrm?dDB0Y66wT?OE+qkq6+X!pnY)7yM@B~-_J&17 zL=+Z^G?CX-jRcn)WVf}noE8;rYi;f6>XMJvO4WGe>3h24Wsf!1Pm%3Fpm)`%m>4(p zp5m(G9lL|S9~_@|+cA~a7Sam~U9t@d83WP9-{#LeWhgwIA1n92gwDbJyyj6 z)Gc_3X65Cp9_tb#+iTZkp6d?(J}oHt<7I90#dJ-nHAxtc29ZddV7 z)MslG+v#b1{39FoB9T+4)Pd6_g+G#si;KfoYHSQX(X*xBT8)_waUz--( zX5~M%KKS>?o<{fMT;xfitd$9pKX>VeT+SL&TR&t849bd{8+sh1Ai%qMR`V+tB7pcd?9wlu*{l(6bet6nib|y4rPnhODXLE79(hhYxNv(sz{bYidNE za%!?0+N~Q=oEDvYZg}(l)H|6bJFOoVJFE+VDkvx^YX$1-+S8}B^Y1^_seZ}Z-_}bU zaYUL`+;I$b>Zyp065GIK*-hcpD(@3l4w~H@@oj>jte>TGl2U;*S*eKb*PXS6aY!^L z_4Cm=g2+cQsCpDs4hA-|ZS5Sb!drb&l#5sH{NBnO1W+g|E6aZK=G9T1y|8&-i=K^k zOMWs!_m(c+W#34>L)wRu3Y0MXF8aS_hi70+zsxxgzL|Wx!4;Jc^OWX`e7W^cj zzxpcrpx3JhLDpuSn=AozzCXN&H}|HnWW2w2Fx!?&nTyGtdpo4Dl|aE)Bfytixp|k| z#d71&(KBbo#obYEaKak)3Gz_Y>oSlBK&d|_7$Z`;2`->^aTa=Pd&xxT7mQ_z+XbL;ndcxS39+AYw$6f^=jNq} z#Wiw8sR*v468TXEnC~!gwvLqnwso1O^=Sr+q>!)8l0pFX-hhH7oEE;Q373&i<5I=2b*+ulI)_K zTUx69=LnOmz`S9-PGH0#VuD(jt6Ygb7*lImPvavi+ zax*$7=TEYP>-*aS9(^Nq6`OPNPrqb4SgM)L-z?-K05X+dExW;TCMnCX=*KhhUrG^4 zoo|g-6hq&Cc{>0_CW!sJ&sCB*@xz`UY_{JTue<1bITym%%W3er{k3f1m7P-9Gtq@_ z<2D*rBzh`^q<0J1A{*P<-lA6!1Ymb(YM*sq7(>I2U=$>dHoew>*82tZKs{LszYdmr zZQ%A{OBxhfr)y@iv9ayif5>fXa}5ys7Npm{e2f2NY5b9tl=PAB&zOi-7~=of$!hjir74f ze=1R0YN%)U^z7L!_n+ONbM~w4hxSkqEZb*$tX_Qi^8U`9Bzk(CFJJbHit_K@-*(Dm z@S~43so0%l`gfK01Q#2Mg`M->n%=gyHmJ!6ObAIxEKMxQc3+%rHSRY`xt4oDOC?yu zz;s{T;kNU<@!Izu&i?W=yd4$$;}#=50|P_Q4PboIJ{e*1!HsA@YBGc7EsT(}c3 zFK~v?;w-?~p=O7?xNv9vYLV>#N+*oN$oTkRuQ~F&Jg>R6X+uZ9XKMX!ZI$J>gcx7= zq_ur_A@jmQIg|8Y`&S0?OG!(Aw$rn-bBy?}WYSp|LIZl;)BNn&n~B!lqBlR27fX~8 zQN5d+{2=W#G-2J%9A=OfbE1d2Q10z;-bbxpY60gc@A=@@%f7U&S6#|T*NNjS{D_2HUTO16cy%@vy+1{O$O`NWt{LZ8m8aAJ$nR% zgw{9yR16FZK*IwxY7$U|a%~^<4BrU8J)D1Gn=h(tkw)d7Oe=!`p-(ipapb3AEy1h) zUTE_;Cg%Q)CjkKgaH$I{k90kR^@i%<{r6`q@FQx|V?Oc-vr@hR`DA%KW)^^~-XSd=KCT1qt)P|P9!7Hf(oz^|q{itgoX1&j~ z7d}(eeW~3yy)4}zrM^O$L6x1(Si>Gv$O-#?7D}(*v`m_5c zLIKQSfnwKV&J>?gF*53>lHNoD!yk{wq9x8{w!g2v-3}!O8Vum%2UM3IH9@e>pfH~S zFgwVrgXcFBbi{iK_A`n+$SQj6h5Q}+slJ}ysMsC|0Xok5bVl+H9r^vc3_J%r1+{d? zbOc-ng^76Z;0YEHpyS`>W*ncskmu?gWG>sje8sFDD7TwP6*2G|S@pc`0ky#Iif^2^eAz_9(aufIc|-7wA-}8Yzm)Rl~QRh~9nzR;j5Om60)t-PMjIMqtO+*BAY; z<$C_JXU|N$*Ek@DCOYnXp9-ICNR3$SEbZ4Pv|eKO=kjfIsY_c}SfpoUU^^z7GG=Oj z123!C-g??FYW&d+T{Y4}=p)02_c?ole=OEi2OO8UsJp_iLMiLV;wr}*(eMa_iT;K~$N|t9xJXrlMIm%#JADwB=W3f0)EtGr7o4 zbB$3z=!5fb15Y_(I)etsmD$*b583@HmAQpGtVr1+6|xSZ-ket&^!c~WapqiG9_2If zj>;-uu92Ih%#o?eHDC8ol0J<8!7|smbZt0UBa_2V)mht9-Za(3bG{Cl86w|X&%#V1 z#gF8t<+TOzB+iUZ>M8HPewd<0;ClVTPYJ`H>72C{?0o*qCwbRv3GDtv-et~NEWvxQ zHcCb$&&ob2PwO$i?(D#Z^|yBpr*9l8ck8Th`IWWu$91N&=F4EY{I#R@6RMAF2Rt%f z7?t>JH=Hy)d)*?@u%g^TyE}ZepIII-kWP_Y2g#bO@=-8 zu`{n{+K>4RK65=ieN|J{nbO{S`{EwDZ#3`CDxU}KV}T*JJ7D=hcS4BA*C3uJZyKeg zQ~ojrV#8i9+{>@=Lv0#Kl4oO87Ce+Sk2rYaNxTCx_K*^TT`p9mpE{!7-cI_>m7jT2 z9J!}#rL5M*PuKOod~4=mW2?%b|wa<##jR%87iA2}teI^c1Z=Y+dmTq}QaoaJX zfr-1WOJDy<@Zqyk$Hdmw9REiB1O73AjpK6vzD_-JKx+>6>n#bq--Ne0*S0($9?+W)D1V8@1}r>GkS- zW(qUg3d<$Ael)XjDOqUD*Z`QJl>eC~%^a5o|Kp)jSM_vFvR%7qg1(r2wkiC!lbyix z#!oVmxSXvc?Y$``EnNXOhogy0T6!Dkn6l=bb6mz0?B;n25Jc(;4;QQyde%d3ASY`==*& z6mP{3obf$~_L$q+g2p=q$oWM+OR2m-P(S~p<~is$pdCUCC~g5|am}{X&3@Kw>6Df8 ze&CDHSeoCqWDpnauFU5PUpZhYf9US9p(rYq^v|`mFW?{{xTi5px!wLP!+$uWe~GV! zJfM1UvfbR_1#PxwCZeICp%;~ucG2B8x=*}z{NPa%3F|@_#vpd*(>1T$X}KZ+4Kh-o1T3oyFiBH`&EUAl^x$%P9SaC`$YGpC_DJ$(2O8b3O{ z7?9hJq26#DT;+a1z(CpEGaMWiARBljmTk}S^79`=L`b8ALqM}P?!f7XC}xO`4eSGs zVN+-jq9Ek4WF^c3aqJK3Jk=4Yk8nB>zhPq=!J0q^0}5VVS|aturNh)0laN5e?fw2; zcjpe0Gu>5B$Txb911v(JA`_lDzB^k-;^yZUXeF!!m3>AL}R(Otv#%EOA(buPMVuE(CMX>+% zB^w@tnaoc3uL!#ZdjqYCh=50i2uHf6J~kns5h6FDXV30)iN1R*=8eBBy+#jwa0 z)f|ff393XhlWq?wT!$S$rR!xYU>DYEYju>0aP)iJP6&yge-@q;wmRZg>6%O|EI?NX zWH>j+oCpaCLGOnR8l}u${Deb7VgtlJ`$`slpoum&owv95VTn7>&?$AYE?v5Wutx_X z3{c*}_0#{$?;_L*z^Z~o6)v@$yiiaME4ga5@}1mE__U%!5B|NUwLk*T(J3~D^w2(!HSXUH?!OCESUPUNf1 zPQHIXC^+CeaxV>s4H{LX5)XP4jz%hjL%vi?{D#f{;sra9WK?|nWN4k)#fvRSClLDr z;sRBVjEn$8T&?g9J9u9;ABRe65Ls~u--2`(QWdSH^&2tM6CMqfli9&>?dJy3wz*>N~Mi9Wsf+#ySHdb6( zTEHiP?~C{e0+Q^w#Y@YNnh+p6A%>a?C$Q~iRl3P1FgfAPl$>dnVJ%_ z?&gkEerM4_&%(k2E5J_%88OvJWn|;(^Z5cNZo`!YM1@s_oIu6u2nA*7@1H+^j*a09 zCjR`{k)UH$ygWT;e*ea|g|P{qWo`Wv89a6xxCaZ1i%!nY2>q=rEC9ROAh3<}8GeTY z)DXsEi8LD`Ua(CY8!?W6s82`wMFxb@eX51L;?6|Ab$u(PZA-aFWu9(-;495cjliAY zo9CM25_hd#)036zD%zpakwxQ+3!x$PdQrEek4T;WUc4^*%x=i#qontmavS440s+_< z3woJ5Ui9B5AKMEXFwA$N0Q0X@7?(7->0lm1k_LYmIiWy(p;q3ASlyqU*jFXJdZI4) z!BkV@IpsDo!ozpU&-q7w(j~qM)q9T3U&1V8yx2D=Wqn@T-MRXTeP;aZ>G_WickYM^ z3KF#$yld}U8dg#qW38Ru|4|=uDy-KpU%xhVDw*{=?DNc*rV{2M$+k%@o*h{AmqT1sjjZZ zbjB8{)zha>)6TQ@)Y4RkbhV9G(l{q77>vFA5&qpw$W~Zs_7HK~uxVM~`MPiVx!%nldzAooy0>+DQ zA*4coISKBVgJZwtE-5fL@MR%eC6hK9L-TG1MGk#0CNi>WauFWZ`7mY|tRU3>**0ku z6?~+ordk(1X&8lW^#{HSd4PYzsD-6v1k>pWAO-~DJf%!{Vy|N7@bNuliH+Nle6vWz z%=9X}y=G;psHkwwR$NLV;^b#;f&Iyb^8 z?0l{W;CI}Gg!>q-L{i~tuuGs1@@t_AZ24QM&mMfSE`*3zJWlwQLGY!w5&zqlB?v?1 zxYEO^o~*BcDDrZ0M#jcWfq)btSon~Sw{2`#n3y1`H)3ml@9iC%oK$}%mWQMTN{X7g zx~siC^&V1ek6pWVNpEjD85uo;{{?LUKpaIVBu6YAYPZhrTubdDTjN9V=Q-GJn3s^# zmcK&s<~SKF1=Hk2a{$C>laX=y_g%&e_HfY0w( z3-Gyc&s_h|Ev&m=A>!2o-V0XE!pKM{JHg>=ZRSEa;(Yh+-6bUjjf@nz^<1M%h@#}a zf4>Weo6+&!E-KbT%*q-i0Oa1poy@!<6IakR|qLi&g;X=Yy67lki;=FH<#f2~aIksx4#{!&+}npR*| zWM)*O6B70kK>aagpn?TQ(}kILpQ!BS&{|o?YDf>!NcJ*B1&9Fz-=|N%LtGjvJp24s z=ncmD#zrf6kSK=)0khg*XfisLXMb4-DzIT<3pmUPAkozo{P7FA#0$-7JUa9i0W;B> zoj!X(MFpXc+t4Aivu`^)FG1yjkMdF{%iP=?rRGJa6;j({)6;#>ozalNw94HVZ=mNw z4@|xt->WG*%B8crBk|Hv>O&HD-aw!xbO1?WW6+w*$7XTTG!#KXLe7e$5`DDk<@CFa zO~I6Zjkhm2rU~|%{b-aQqdP2p`)9JA<5Yx_i|-Poo^uWr#|{l=F%D78aHjbbL(9s_ z;_bLF`MEM|x2!+o$o;+KY!?EhdU5u->7p!n`n3qYS*eVY2h3G9WchZ^!QSg46zlB3 zRt!7}@yZ9_F%t;TX+QXf8R$7pE{{8#c6czR0gh5shGq%vi#$68&IdKWXW$tnEuuv_p z$|Og~!UDs06!-u9A*PEa0)*rjZg}~%P&Y%yx{Q$z)(_5paj8z6YYAi5IG*B{FJEw` zox=k(T~eRud3DSyWYs>;ghj>w?$745=IZ2YZ8@!l#LkknieIY@kM%ZtF7HeCWa|)* zQ$H{p$4r`-L!Ne=mxDnlk?ldI`n=*}Pn%)a+vNtTW+QivqSiIIk5K-W(2}~WtbXXR zb;_vV?^vy7P{w4o)d^m@2MB4N{m*F4>iLN&m(}(4xF#WQBij1O#fZZa?;XcgApN2R zq0)Xzlwc^LR=#-Y?$%O?2Df%rkCKwo*56I0t9=h?q8fc8j!42V9-Vh*F09XapIj0k z-juvF zTQlxRMis`@b{Y29$7%%NU0%&IQ$(r~uG{Fy2(lFCp(XxKb^9@q=L#Tv-;|P8wMjj1#pXv^eOu2R^`U za#c{MMcc+D_JAFN51i7=B@jT}*XkHd8pjt<&X@lTZKJ24b|A2}JsKfRxCR$#$ZI80 zDGfQ>cSiiX;ZCYh7Sc+)G)p|+yH_vA>Co+>g|PD!ir2$AP`QMyyOq;AL5;}}mN?3X z^zH0yI5P$tCdbDSH3eYJ$;m-qNp4@6pRc-21*ptH(}cnZJ+&J?)@H`!I1=cMs-6{x^li2q5#q_dkJV*h$=m_Y{mYPe#mkr+%z7P z9itq30;-$K`Bbj>FDITzrhbuyG-i)=Awr|Dfp?ruDD5D$zfnCMOAeiLY&=AvmiB0s0 ztNYuUI?7L{-`BYGCMhMr4RI4Lt_&1$pT*WQ=#4O9qmh(_mffQ^qv2Q2{H3<9Zw)eh zH$%|>zO{x2bb<(^uxIcL0I|@j2L}f+ThWIgAbKP0b*+hjsI4~<5$#}w82I^|Yg%Jh zzJ`Gp%t%6mA?5MZIEAbfto3fS|Mo)9zGq`(1Ym8D(&5crwxQvyo}m1QQv(oMTov}% z5AzKXf2eCaS4kc)0(QX9&yPBNQ2cq21%h5%NF|TItc!_rp4h;kAVe2mNUEGam4FjH^ugt#iOG=x2iRP|8zE2hA_pu} zHKt}~)ynL-)BVw~KfV&Z2`Ug3YJZAs5qIlBc@I7(lfLXCspt%1Ts_;Rq z4A&2`lc74|!f*lMe1A-IoVtpE+zw;O|AwKD@YsiwQ3weNGD+Ow3rSZ_jUinx)|OYk z{sk2qYmJM*$i#%O+*-pCA7KFj%*b234G}A%rK59(#X4N;1X%~+qtn8| zZ-(gwPM>}%=D@bHCt%^~)JG=~(y!5nq`f_VR0lvHQn_@gwYeEmk2+Fx1vkRV%cVHE zzsrTRFC$NYJq39a(BBfnEvBY^vSlg6|7i0#LG-Y>YWvtfp#91rHZXA1!7BWa_~&fH zGnlrKW;z>I4I+~%%j>j2-nVZa#|0?06;U@y*esaJm%?H^M{^99;G2p8jFSIL%< zM|07T5bxf{lWzs&0aA35@r_BkX2R2_RLsJ!@bIRRgIRK8ju86|Bmb9MScq|QQeRB3 zVbCBY<9fhe+7Kc^2o%gD{{LNsVF$~B-y)w=>jz|!b_)F-6nFB~dr-+8E`t0r;N-)w zFn~k=RAE((dBLN=<4RwbFRPIKme7^Yz4t6W9PttQhI(;{derh5>CJ=zWma zyI@~A@))OU{=-lcBwo17gll$oJ|GA1vhWDWPanKM84P?J!$Wy8;JXqgyXVL zR&Ym0M6ej5gmMFuA^^S72j>f%(Xk~pN84@*-~0?`h6z`L+%bNI_l1%D!lWeDK<78{ z^fF|CGe*b%NaDc!}h7;=7p`Bfxx13ruf(w*RC1 zBBY$fR3M zG&D3LKYv==*cettR}lME!gjlpaAN1cy4>UL#Fhwq_z<>o2NM|{iDoAJL1Aw0SCS8Q z`C`J00A(t&={jb|MMZaJF)=dfOUzqn#_VSrkC}&^Wj6S)3SSL{c8$7QI-%Drs1{bX`|wzqJ#JJUU|wvUKAvU^{ta&W*yEr<9u zdw_hrOe6XC&t2|oa8sd$Aajiw3N-nhUqLVb`@l`>>IAa%-C@dzMcBg8k@=hl=5e=> z2L;j;#%y$y?K4G;Axj?XR(oT6&68Ow8TIF~iOCQ)>QxM)fM!Xu=0Gul z4-5K*fb1<@R{_eM5;EQl8N|Qlv00v8rgk{fY0l%v<<-^yK*iPY8x0B8;C6O#NvCH^ z*G6*;ObKbVM?V0liHGWI43XiZY*%H&*5E-@l4%m*I|MYorhd?EUrV*|VL=S2*6o z{Kne!AUEM$I+mowMEYmtc6B3|udKW$!EH@Bm>4h)?p=Ufl>o*M@2cYC(xvR|Y={uq zIXQOMu4zQRzR!UC@1On>d2<;J6|$aM>pkmSFlzo2b!rhr0{nuhfYXJM7sv+q%7Qm> zeUW=8xaH0AR)H08VTSPO0YcGdbb(F)Q;E1l)yZVcWbj&&EdQd0=h`k-LnX}qf=BUk z5J{-1Z2xF6DR3fEx%ssi-BH$vpaNdRFp8TQTM`A$>+yZSQ6aPN3f*TTT(`75B5~&y zL@(#E)9GXXEziEG(NR36!yyXQza@nz)3kXY`ape0ZnQbg8OjUL((sJZqnH@&s>6E1 z#s&HLP$Z;LeS!a?Ln^@(T+Gpt*$+$^H-6accrDD9%!*cUmE9-qK-z+{;qC9+`nB z2?;%~ukb?nz;GXiS|E3@TPUupEv|Xu@6HaW{y#7rOq*Ns55J_#c`XUp2d7d99S(pL zh#%>a?bUg635inVzejHHjTfEyQu_M7;89pTmsyFNPxKtbj*50=I`8)J)+H`v=IF?M z5O9D;g+X4ZE@4We|HZET)lVTqKC>=TC zzh}daNV-wq!@1%w ze0xoSEfQmwh15GGkZs&t_R^>Jb}9-Az@nZ`g;*634W3)BJojsTdkbwCGA~#Z(WYC= zY_!EhkdeKDq^6mtdT)HFgn*@!7IW0WvR@wlUuG%+JPc4sOc)9m8t ziQ4beHLn|t+0{svJ*_n)@a$5`KVozgz1IzI@8H@0bq}m5w#Zj(Cf-5^y*HL`9~M{Q zkQ1prv7ni7)6GRFVaAv%Dy#m6yDqb%2iZoR)*xOX)ZF0zRHhX;3>KGIeB)n#_Ki!r zV=WP604bn*2jwq6dhQTn3d02_4OyqX1fJLjGkMr<8m%JC`{r*o;Po8)Y6JiSZ^G%u zv@4NJrnZ>}H?{1iXPc~3n-@M`i-jNg6%uij^qLBnp!O~0W90w+;MbF5W8Hu&KyY$Z zPi%%8&D>WXFpzZC4tZ}}vh+LtrK+Xn7tT0Zxkkr5rP0V+qji&uf*jvIdP*fXjAm-T zM)6);+k}H7d7vx0tJ5UWG#&*Li5_!0wqeG5@w?15g{?Yb_jEoGpjB=@NaBLzYBk|${0w2s?ybuA34G_hP z5N)0wY=2a>TeokcpkkgBn1|>nd~^t8=oxrBP{SzhH{r<>oRo6Uwa<`bku&`KeX+p* z+s;+j`pTG-OEEPtutr!~S*^ac1LQ-LW@ktOESGPGiti|ElamZgOgHfbFzzrZ!9dma zL7{Qpw5xv11~PeA@r0Mq?qW1hYrwg7jMAHHq?s zsWG@#YftUUiF}ds60z-PBtW+>xB1GSr}q&#Sd673=YH^@%kqqVoJ9~NlOrgeu=LH9 z60?*i%9(*Fp6r~Q$cDXY9G2O6c@oqo9n>3a`W@w(hll+9%9vwGWaMSc8{o=_hA|gw z`}xHMXpKOLtSl@P1gPZyB^2y-eBhU=f`WqjNPI$V#+V8i{#m<0$wGtM78ZC713m(~ z#NdbH*B*vU19CtbvbJE?jV5U&;h}0!_s*Isz1DR`)1h;TE5#SSC{yk-d{4$l*gJCO|kjKS{&){MO@vvop$90Np}n3?I%Ge4i8TAx3Ic1AcVB&5k19a1*} zGy2ohGpFD~Kn=i8+D#}injIT^2j2qy7&VTZfQ$+T-!_qOfNy5kUo3d?|dPL|sJ%<0zPEc>)TI7epcP0YO1i zhoxOpg&0kuhdxiw2qA>(GPP&*iK4d+tgK%7`N`rdK`aQB&CNf7F-?H-aOqEkZ?e&c zrfW_^<7#-w)P?C@d=cW}8#pIZ)}tU1 z{W`QG&GnVXsq~42_zoQ4=&rar%V;+U(9zQ)_lLGo)PDDe$9@T}*2gM;e)?EsC25^G z|NZJM%fht{tLnS2Hz9<>(B0Etrn9WszNXQ}D%Ujs_HD2lKjWX>k6uNaa|SC_wyy~*e^R6o{ot(-LDdgzO6=#yo~Lh_wdp(mHTL> zDl~ttD9W5=3lGXT4?3d^pjBv_ztbH$7;Ecp)%B^Vaf&N@((ZPO(E#KG1sdc(piFh5 z1Puw`+v(=;aNSXcT%}yby;)_(vbmyr^M#=#dc~5GfJzrjV|sDy!P@11>Kk7bQ|d*> zZ`~sj$D*#}ymo5QQBE{t+PMUA29`kfenz^Ir#RuusVQV)*9XuEIA7-PEV6yB!Zvl_ zW^b_0jolrzdiWceQl3Y0Z`xxCqeF!`@BA>%>rikki(7qyMWowLrF)@!#ge5UZkO-5 zpg$J9ZY_0hzPFVDW2#}SaJPyWZ?`@4VE1w0p!jZOtKR5gX5qDS=vqSR``;EQ{_q&2 z8}X5#?Ym_vS!F+Jc^(mg(9eKn419o-9d&us`76=&F?Mv+2ix0Vxq!c)ihvIr8$g1U ztv5T0e_PV=T}c9M=XKc(qTl&V{a6yyB$HVnsHy-?y#M51Ar&>j1Y?urPl<31^{zUQ zt{IU#bXXf}%fo9IbaT58-Qk-)L`U#lrg7dh_8kVwYTH$y_7ExvCS1L7BN#Var0!0A z`UoQ#P;ehRQbASXYJjy!OiWBcK>?Z%djBhqJdwJI+1c4iNo;(4vGIdss#0_`5e^s; z!3l6yj^4e7e)H!~{~=9-Li4DoD1dtu;Z`zS^pT8B%`GiYLjPWOzhiI{@oqR7m=BmB zxy)D`yd9jD$^$bO=P5{Y&tdEexU>D8bROK;NB7|1jqBIZy1hY%5akFY8d~sT=2W0J1cKj4NBWPQ_~o;dfx77l7GIrgjL0E;<4fg6$ji$O!}? z0h4~Ay@G=ZW)WsOx<%lxyio`aeD^qDTXxSdsx}VJj7l&KDt9nb^#adGZN=+4f~ z&5e&Ir>1V^ibKzj9|5fkVtYaszd1q|2;t4=nHezLbOgd#AT3itDj|+hJQu_(ktD!> zp^Q*?|M~K07=om&iH1jTKHvr5|5(d#9XkEv&PZ6w?) zY|vU6=Rjsg9|;WL+&PNF57szB@4(86z84y5C#NwOY~)6GvI<^#{G;*z`hPv|<+VXK zmqmT?p9hkMhi7C)^-^2eJ&==xKh)Alg+kMc!yDQ_^n+M-@J}y%`-a2L3aDRZw=SR2 zF+_yo)|DQ7`xe?I-j7l4R5R+yslnfdXJ#6s#8wVDwRwx&O#u4>vpMR!U@9&&2N=e`aU&Cnd!Vs?2N6C3ZPtMG;}XRw0yLN)qMjh*5{UVJ zAiJHNL5tsV(X*oS1MDgXJp-pFs9$fvqLckM4ymD z{X8-nyAloJlcI@26C*UUDu#V#8SexM?&PUx#4W8ggdj7_wSlcEt;?f z-vV2U<@pej27GW7u%xXU(g{F8pJf*smd1nBkqb~@w)CS%8=d1f5GSbY0b&D0lU9&c z)GXhr6CrS{yJ?v@;viJ#Egt-s?&d9R16PT$;llP1a_D&XXBMQMePug_R~b85i5 zyfwuVwuTR~p$0Ab&P>~^i!FZyYMj#vXO-9`nrKrDwIA$!$?K^oFB>GISHFLM8(WNF z58rZm^H7sOvy=RudM;1z*y32_hK&^RVg7Wj>>qu14C--?P7>7WZi%MO=Iq)$Zv#+R zn+D(nCOyTOi!qX$$)=uVM7C$Bho>JaJXT-Szvwkgzsx%m4e?Mf4(B=K`sQX`zjiH0 zHOWV_>!2_T0)@ILs1xrwi_{rkOR$nL=-K_!QsZjRlr!JZddRS?ouUs2`*a-;O*Fmu zCE+4uj(q3)gj0=x8tWNM)2TVK4F9s8TE`KaZs@yiKSzsb+(_0gT_xXsy#wdGS-fuS z9xH!t$8*r77zFl4ll*_J0GTT>bN^7~+XM5amxS#7N4p9)q-_PUWA0Yv!!D`zVsrSe z|J^5-`6Bt#(!OyB%r%|3UL9gK?`FZx>dRTRdR;bNmgL0mioD5u_%EI%e*a4ISTAqo zrs~mI+S>a!@3KFrT%>qvwpsRFn$!|!NEAAMvlGzj0Hc>WlIlNc5vcJ2UJEj@!+TVB z%rrAKRT(}iVBTM)A|>3SpOxOo{QV(y-p)^TZl8VCTeaOr^VZ_Yf4{d1kqD#wF|EV@ z&g(_X)|QqEulF_*G*nAm&Sgo$?-rN9I-!i@1GNzxtPv|rjA(VcSggNGvtDhl2Fv$# z|2$l_Q~L81K_4gl$BZMM4w^pr8oBt{!DUc1Zv3p46Q7sQx8Z#ZhQD&^m0#7@kDy@( zM;oR4`Akk;rNyGlf5(_W>M9PydVEkAU64*vQBh z6cC^_DRd+`#?)L({6u}e80>_7M!Z9S3)<}$r)capoS}A@M0NZw$lQHl8QIyDN3`oD3kieX1&N!i$a-E9#{_H!e9pnYAXUcxU)0140 z0;=O1HjJrfU%B$Y_hSbL8e_1$IQA*e3CqaLLgq>4$-?T1YSVfWQDI<}r{MxckpKm> zfL8D#k2>BxFPQKx)j(TkX=G1exsW&wdleCJ^7C z6~=K?;d-5Wk=ul)_K!nHEm9k@T#2|yD9M1fAQH~!*C&f8M>6~u?;UL{BbYiI4aluy zY{H-YjplZXKZ?ns;eaTZ*dvJ81J^Cj=m%N_`6}3udattg{rR7g%oSe|3a}S36g$se z1y$>VhFa-Z5^{Ob(zq&EE-$+eOg#`BFM^|@uRfH`j8Y;PVa3mW3d>=6U7yK7K8cG7I3J(iN zjZpZZ9zZp;jsRC7ES$3^Ssd@_1~_2gj6gpw?|?H2?JX2Xlei1_LGbaO6}1ZFNIM{1 z!fQeJ2pnmyWKBWC1WceChdn8%)ykfmnhHQuM@$2BK-!w8=Reo(QP3gya%U0i>m4eF zQXBf?^KpQ_9d&fVQ&Tyuul(R8PlFQ&ie3R_Rr|exF9wh^bQ&Az=_$h@qX4zx^waZe z9=9kxIsw!VJlk-QRC>LC`|{;OXbX6v5EynGH|n7h+rdXC?IB^88276Kw%UsNQOJ4WB?2Ua*%Ke|^J6w~#C z^DPjI0YT&u5x8kyKc&gL?_a}$++$y;1w{XhO*fms*I0#+b+D{xk6m3{>ebU094AgZ z)cmd(TJKW>l?6ftHRa@>KFG~?H(r6X8&VBua`Zqo2h8+)R8071R zZ!53ya57zFK=k2a@G6+~-SGm|5T*d10tv}NYYc!6ZK)z(8<)O@I+|#7{n*-2|7{z& zpua90%6o+!JC?8?;gcR7UQ@K+Wccrs?=mzou`hL^e!8N+$+Uds?_ZGC?(UKV!cO@4 z_(023(cdza+uTcisYU1akETLKwr=dJ0kR)~jU3YRa&Lu7+4BZ(*&0e&z@=8wnt9Ykxl92hI!yFa5$Z zV7(Tog?+}Nd1?{4b4X==X$5G>+FaNgznlX>;1G4DhFluQBKfudHC?F6#^!~usdr?v%kP58~+_I=)K&=ma_z;Sp%h-gl zOr&Y9m(V#<8wf^#b>qg%e*F{_6jf8%xw(#(maoxFAk_{<&I5@O%OcPzsB0lyLZl#) z_MYMffS7XO^8gG8E}&w_-H7Fhr)O}9hdlmfxzW&A6_^2Xk&=|8bJj=f5P5CjFp^VI zf$l(w0OY2Geh)s=jt+VPtgIg(Fg#KU3Ary*?NjRGt5QvUi!rVTVvP!njE#W>j+L;9 zlmw$qgyaHRT99SPz$c(n0K)^e2RJ`Ynv2Jj_?}}`q3}nEkN*ZO@IO`|05zbw^so7O z6gOvA7UlQ%<}7#P&_IeH6xvuMkUit?-!>~mmx3%KY)SlWpx0U2VUQQ$nA*w}2T0`0 z%*?TqC)cKXH&xpTvaldYPcP*zC`klt($Zc;C$KIYN+=wkz@?zjM|i5cvvWi$Od~!v zHrMOw>Iw=r3&!A{gZBeB5sq5?g6sn?(8<{uq6Q?xU4z(^iwl8}Ak^_1C`JJN0%`*r zMpH8xTxlRm79E$NMI(vn`1r_S?}uFYbD(U#aYM*Fh;sVy;pAq?7r|O}&;m{9k3EbQ z=j=mJb&8!pC!ST4HUcl+K029j^kQ-GRsw9oh!hUB$n%0BxDw@_)YWN+!YD^!*#zi2 zUt-rHIL)P{#4$iR_$`h=`*F$J5fOSg^r{$Wp8%}X)Rcdcm30Jor2Kb0eV^DuJgRTJ z0qN_rD-w+hOLDlxHg87%upiY-dYDcpH}^S~M<;AObkl+f32CYr%DCIpG76)U5n)bI zE=ufvFWPB~n(eOZauo@SX%1fIdnKR^KbXFT?n=#`-l49TWQ2M*q~E|Zt^`us*GD>? zv&$JaAWXx8XZ|O9&^b@Oi(z9rp=XRYB@HmDF2C{FVrm)kg?U{kyi&;S*)z(+qLwDS zP}SYGiR10w1pcmyZ#y%!;|AsLw)Mw#tvqSrtbS)dns-Vzt&1-N%pe&;rD?Nep(I2U zq+{b1x2rN6H`$lUY+<4EA7f3is2Z3p&xUTjfAk&f+sj04w;J219)w#HNn-0R!V4W1P2dQB4E?)?)3yj%L-vb zF?-N;i=-N1Nh1UTUS>Q;s7E-txQ=>x3G(n*#q!XX0|MfRLI;TCVL9WUx2=$ImX2anJ_8T#25f5iwZ1@ zz!coQf8XA^%+N^(C(zjV?V0K`zMGi2Hte)A_HS%dAF3-lk!z~DE9t~&Nl3bF;{Ki4 zJD`V?bHA6Vda9%EZOtyz7wz>rxyG+6op)b0YEju<>EmPl_}IY#LCqWiLI2<#$OHt1 zl!KG4!$l!d7kw!|KvuMTxU>P#RG`voR=Dc`b^utQUxN4Or(cg7>!$_Wc);;H1AEp+ z?&-gD?*!~G1y}m`sU#&ZJV4C?e0&gAt|HHqtr=8_{f2_-<$Zf0=1=DL+Infs)K1gk zB4HO0fE&!|XSK+)%x?9xiDn_BsluihjD!CSo0o6TzGr3k&Yf#O4G#pKf^hK>OlT$j<yNNqV=uKW^N)pv4@)sV8f)0=0&|^Q!%zD!AlBTw-(#R$*!NyMk0FpVaRu@ z;;%`)*^w34B^+pnLn82$OecV(do~pvFJ7<^L+{_W+i-)<%Fa%RS3rJhm3-1spo!4M ziFQunJkiY^;p<(rfEFG~F+^~%5fA)5+d-Y7i_AODGsDIGBk@`c&zr;{R)z0a|80CH zJfykE&ux-roFG0=bB^_-)*v_+-4P06mk1$~WzCm5SRsib+k-SIuozfM<@qcQC9 z9ru0|WnW0r0PhbJCm}2`~h;Q2tzHo{C4jXz}BdEye{(8Wflsev^y|tqkke?>IgCCPnHXI4*A+Hh9CkZH+7!!$cKrT_#DQ5HQJD$FMZj=DmmisAy@7Rnh= zObD|v&e9lcY@}?u?l|!U0Pkgka{TMi5g)-jM2Ut^M@lf)R{z3X7pXgn_lbMG61h&8 z9{_ZavL0m4FpM>@ogy%dUTc~2d5d1gE&2$9)+Gk+c>AY}TwM3E1WoL-Sz~x&s4iVP z(%f?5LnTwk?R9Z;zsaN$@X5A#kinq+YJPEGu|6KAuAvd!8Q^OjThFV#0L&|x0 zsx{)mZ#7^|y1Kd!4#Jk>sCwZBQ`){Ajv%m(D;f?ET9~UNh!VqARt3^j z`bZF0xLAr{oW1@K9}IU$PkZ~|-%@@g_TG zh<7N$p=QFF3DG3ds^Iv7cnc9eSNIE96>aejQTp21*^#;#ULMnw-57^-w zz8x4F3I21Iq8=4$h*g>TyW24~I{F&OPgLx9`E6hK6W^Yz3?p+&Tt#I7Npu80B+@Yv z(5RcMqNi-c* zisQ|jAEP1lsQ81nhhTvq5tDUxdO`Sr28kIl)hCCwh;P4s0c_2HOqq@fz5CO!wWk2f zITrVi1v3yR>nA~YfNE{TmcyWe_8Ma&M@V8L8y=O|E&pElIt*u|-SW?dfK$!f_7K<& z6(+~~?MaVlpx}+-h03={4vK%>pKU=v3zh|!Kn%ILUk2gY)o%8 zbcA3g9wJ6N*K`s6Dk&C43Jls%uv;jNo`D8}%Y6yI1+fE8J1mJ4C<%b+AT%7ju7JGL z1(1`yFF=9Y;+jBM(7x+R);L$NjZhJU0L6fms=KXi8V&rNZrmLo@iWmV03JV$a~cc| zPQkb6)=??|yJ%QEdiwNfYzoOLa_boy0sqw(9iflQhVm*1Z6=r~pN*!5k!Y>+Zr(&(I|f#WrW%p($uaK> z9BxIf3WGRN0hXih!2qqXFJCgT&{I=g0gE_0v$L}1zVfiMwFOdu4E`KcX;?(~OlSoG zO=t!Emf5jmu)Tf#c*L&VyV00o&mf!fpbQrfdia29fN^85*`7R6Y`Kh{2knu@7NCHL znMZD7Z|?!|;mls^c{=5ZcTg22ksn*@IQwZnu7WWEuLOUNlD-< z{LpJCy&7Izv86pF9k)yO3yQvgC!ZdN&(GFdI`y_h-2INlikpLrS37a(fB@>$oF=uTscB`S*vXx%T9fKgtAjvP7di!cB9k-YKmfNtSnW31H9f) zmq~FhRW9;Gs|_4cwY=FqCAvN<`+lb3nt z8~5<`$idj`=t!HxhqnPty`98i^c|)a&M5!aA8ysAPVQ;4zc|(A>oo4np2Xf))^}vr zQ?uhQ=VBM<*2gXi9eMkn^lo{yqwZ9PlEN+`d=-7{+#`z!P?W_ zSm^WR*+-jmII$1g*nqUdVg@||5{4^|ni7kj$vbGQBQs$Ce>uMBJvofXS28~x^Q&36 z6a&rNl~EAhd^G~jnX#GXqs>E{zXMAVR6-mv`qMvbtyjuF$*YjMD5VLCf2!ohkqn{V z3bT$s^)wR&)BR}(R{NBsWZxg(pZgY*7ECBsypL^_$F#dq-0AQVFr>O=I=Vu_?t??@ zf8NSC+E4-I&oA@(-%XwLo7ge*Jk!-E{eJOWN*ee>sjF?O1c4$7e9xwx_7A|nX#r3N z@1^6q?XE?kIqq+q3I1}4X1?TgurndqZ=gFfB=B|fiwmva7T2hH@?6QGknBm`_W0@Swg&tD5|hKLPD@fB3y;tSMe<$jezHSFTI#Ot z7MA~J03k9906gaZAl%CW;N25kAGlBr#TtzC5k6aDmvQ+2oxHQz*JshzrukrX>P z?bueo9}W9)3q=%=Go%TpnlTw!z*x^o!@1kuWLRf7+|@dFLTG^-Z@b9#`K9MPQ52Q4 zkug-FO3KPDiR!4~(p$n0L=dwe${FpN~Ouz5kI~Y&D`dE47c8-=+6H+!{79!0P&?4jX zquxr0-4*Ji$>z?w+eQ7-=s3n-NNl=mTW^bAg3)K*D!SNa# z^3Hh7FcD6_E`EoF1_LyFkb{5*&00p>+?rd$7{}H%LGsbpDCUZBQ8BIT- z@XqVl^0T$e1Gd{bzo+Fjh~6&UpYhtOF~#-x=24n;gg0jFm0Ar!Yk~EKX(|wrB7yiP zDfoq(5>Wy$%DJ|S#!&bG_9T5n($fNaEuyH2E1eVKlj7Jae^&q0Q#wC?;9)szRTmY& zv~+MsiW?){WdrA9$7Uub5EyKgR#93Ca0S`|v>njpq`p@J+k;G?$e=*W({_>;G$jfbCln50~pdb?UQx?U17PdX6 z$Ps89TiJA)6pbV#w5IyG)af8+745ConRzN{_0T*(m;kTchttiZq3)-TIPJ8Y^ZUS_ z0}6$gTGv!U^Nenl-FF^tu3WEE{U+v49q9d!KbA0*@$r3sz(@c2SE*AqX_4~^a?`yP zKj+_>qAvf_#{Er!Tu97Q%VKIS9*2i~#X2&EL7At&kL(KfEoCQL2n;WRv*F(QuTv6S zaZp9W;zN<@CgD^Kqyg{cHjePZe^<~}VMPW-hT_Pm+pN5?;S`rGZ=TG#|MA#nOSW>p zqaST3AoN6Kb7>PKb&{_t<6eo%nQgZx3b%t&`mYHMS|*y!ZN?)YwzR^6`I zE{~V@d|Z1{SJys=o%!;ycca18 zMKITZ^veDN$)*q=D9nO$zsxU7M|eb@R50{23=vGO7P3bdv)R@TpX<1sZ5UiR)s_B# zJt)%5hhDORm-r**leQ$s}s` zMZES{`%-@BYi;?>o8tf$rq#IqTLMv%8@~E&!%RSpr{UX#H(E(%zbP3yapZvv8u8`c zRQ`ip?L9rHTS5!?i&O4WSKvgvquwVl^K2h~2>+s>RglBLounK!&QYE5Wz;)h(JP~^0UWa?lX?ynV%fx>| zZ!(2#bIYd^+uE|_)vukFSBJL;2N?v552>%@_BWZ{exd#+AoSgDe&T`Yvvt8@ zkMLLFn=yvcqGfDi!UgCFnl>mfz;LUIik|P%KcsP|1;sf4zr6ol`kYgz7LT(Bu?K_& z(4M3%u$_7RV&BcaY5q^gUtX8s&(c|HHqmcV#27!+&^XzWJ3m6|iX#N3aZm$)0i1zn zL0z-u(XccN9Xi%K+#CKoICA7FW?#CYsleDax0S7V&-AtjW!cCi$9@BV;b+{gIX;yB?Y{9>-cx?Ths*TsZ}J`f``#_ zZ8d>kj-1=|&z(aS03#W5hlYEaUh9b*a!|6?D6zb=^}Wn-rhC-*QI!asbVspoSyY|z zTPn2!3Tf!A>m^v$IpK8w&+T&BDOEy!le+p2#AF_OWm9a^>0x!*sOj6a+Do<5+`Ys{ zcf5Gf?B_?g5)!7ckO$?j%x^-T36QYUVP+Kwex{7O@2#@p~{GH`^4gA5z z9lrUHK%+^W0ls8qb*-YJ0+L*$yFGraZhj;owK*KBEA6TiyK-wW)Y^i_TurZZl(zon%%267Z9S^4S5XoPQp>DtDLjY)LO;CS^Nrk&9n}CIfS%z# zYi!KxQ&HH}j#g_G@S<9#SepGx zGuWNci};b05;vRx(Nrnl3pAzp=(~igSc#8G)-NA+%@E7Ga%$h%*R1!p5=E}3oaA+? z(O3_Lb^LimNj)kdEgwqW%&@zZbTpe(KQK6@AP@ z!0_QQ0VUGc;kRc$#&Kk5hiUy8GnGX6(U=6&0C!Do+?`9E z<16+Y`peN%&g+-Q;bMWN^52DcM?AET<$&be$g3LZrq%HkI!*^e-wx^E(rKpnjX%Kl z1NBO}==xt6o_`IlzG;EZ+i^O=Z9P461NXDRAOoU+KPA?rhiTkKH>jhr(Fuu0K#o9V z`T$%ZR;_=Ea;#5gOV;(e(Du<6IZ7XbjSed{IX~q4^8Q@_=<=fmw<2z?Yp10jQ}j^c z6Uc=01lBs}SVAEz0L=ey9XfDHUAk-dt>O^ERxTUJaB<(KTuj9t!}1*+M*zpmu7_<0W2G>y&t8~?W1G%I zA80j(uwApw;OFPwl#ZhWLW~ChMl_kYA+aLy3)mO_IlxZEz+Yhj07VKu`V_h)oUZZw zDqq2Q1H=>B?#YwT3f(Pfdg%?!KW``{;;-@j;05*myHX=h4wg*&`vT-dC3{7C_%Kzw zP=E6H=HZLXpVt!rte7`!2sP=IsofIxbq&lrZU=xnr-1K31Pn#aks}{aW}uiyeL!kt zu3f_@3$Q!i;rk=#7#JWK7AIikK)y8QS)?0t2?zwkgMG*Is<6>3q+t{>zbPaQ$sp!V z!`z&tbu>NvdfU_w!0oW>f5UM;YC6*DLPtQ-6ctrsT~4k6#F^k5YGINk$Ovr(Kl*|| z((tZxb;d}FPxFzsIPF(~ILji02le750RRm8_37g%MnV8*$E<4P98DKRucIb%5I}e` z2(_qXQSWcD;ZXX+`w!lw_tz+`*?G1s)$Hf)$$C3J!y6Ala$~0KHw*zr@dyP+D`sWf zOBVTLBD4F_;^pl2M`XhWT2LxmK)=!Sy{Q1|YScNM!C$t!?okOL2%UF(^mm#^=BtCYfu3pQDHR_d%1({XW{C-u@@;Ce^kg~DbegG7OSmutDVC_x(;>e+kgKF$_vDO zqkV;HyhqIcvW_qksL&waJnd9Lrk-|r2Luem^8xBePJdqoL_^Rk`{D&d`Sr?ODU~xm z+N|=$W5XpYjJV30N0`=ASj9R@_EYCt76Cm$&vqU4*5Ik!l+F0yuEby|uw7LC(sU0a zXABBoF+=9B5dQe&uv=r$qw_iD2k^Z*2)3GAWn0X8uNoJI1|QkLj_>`yQ!4(S-`hhJ z11wfxJcML-3CH~Z*8`K164SCu6lwf$>V{bb9(iucb%Ko*tGL%)`fg6fGdCj}N$ET+JlLWT z_o8!xa&8##AD*Th)BCiMb;6gCTjawrW+xT1KBmulvQmuvG#n#AB=kriBzyt!tmKX5 z{EU(K26$FFTHwu^*`n~up%HKQF%U8IjEouqUj;C}*u%?+YPV-Q?Yx`yuc{eA3l5__ zBI?#KfYTD-XE1#8dnV3wED_NB)JYfmP2cdjFN`X2teS`yw$ZM{x(?#{#WshmheSe4}qhFh6X}2 z1cdiclH+!TRJ{q4tDPT_d4ZD8cvc_Fz(}f|yAQk#vMp4_V<7q${(|5TKyEUfh?B=iM|BMijN_42R}X?#3939WE;Sg&#c}CmupD+oN*^}L zQt(sA%{cn>;prhHO*bbR`om`i=^X44kE^PTF)NH96ODmdlW`E>N^#f{azMX;3Xlj) zSJy1$mPGydcW9>IFAtb~^ZnH!d+6X~>}nq3uR&jj0qYa!X%QHSj5S;qP~(^&au+Hg zAbOZxp@Pua@hv7eL8Pel=b`k87J9M$-r+~G9IB@do!+>Gw@)83Lbe#H)+6M32ciH( zAUFxk2LJ?bT3{BVp9f?K$~c@CGz6|36~uE8K%T5BJ=4jY%zNnVAe@0Ea2iwuK7t3V zc9RMF-XkNFG=e}YR3A=(_VU@+rPi0(-KD0I1>uH*Pd`P2zs5z$!B}8Hevq#miay(4 z4h*P(7VPiOfT_|@3`$V8O4z`z8Wkv{8{6CebMQ)QW#6|f!jD~qaD91sSzT2X)Ho7^ zp-359xnwN9jv)2mbVNOdxZ@*XgPrpC*TB``;E-c*bBV_~|FmeN8GT#65qbz5XT65l z2M;SNZ?VhJ9izIj?U^{285mm}M&Axqu@bnD0#Kz9ZCzag zat=DUl3-n>Bw!e&13ixM?+L9?rF;OULtyLYEMn?2)VgG%FiK8fiJHOwavR7MJX0U= z0iFf~w73SS!!{VS+KP)>-x*Fvfy)(kRz%Fyf)K$iXCkOviq zFON@!j=j67$r%}rgwEtGZAP_~10iS-F!BxyljMsq@nqF{F!gR!PSM8f9#j}_u$zbx zND27i!0UcuYAyD~4@hQCP7XR7fD^zeO05_PygH-_^z|vA()l+Ekuuy^ytAw8VQDEV z9TkYYkK8$!$>r+{{W*+8_#Y54!u#{R5bQ91TNzB_z<>#qJSHZOu%YDTmjaJxK2_u} zDr^XN3d-qD43d;}XxSM?wx72tu6+0qi>US7t|&*iywERzd&8oVlhdJS>6tD+;pW!L zw9UoKD}Fo)cnJ>TYcz_8k*xPZN`i8{o%c=l5bwXohSPPWHo6#=q8K3@TwN_-&d)ib zO3ktd)fP~V0UQ;*<=0n^Uir248XJG|J@a1cg57C_-u=@Tj*pAqB-hvak;6zqW*~rL z073CTAIad+k8SR4NTGVfD6;riCiR&is$I8aZWvGfOehX^eIyuZ_Di_P8gjlDhWT5p z=){zu0dlam6lS@yNhtYCYnS-v=3GfZESrvt;gbn?6teCVZv!Sx-*_xvdnRIIK*PTG zj#p&F*OUo;m?TV%Xa^=xfHRbQ8vo6-XVePSS{$jK2dDZb6VzqE~w zbnSb+c|e+2+~3wM&>T?5DI+z(glsz9TyHq7a55ko3NvdV5;;WrO0Lx2NwpiCkDPWu zEo)#9h2bRzf6kvDWWH2Eb)H@}be{Z7aBDKJ|b^B#@dDWkQU=cE3hgHbLol4*I z^|299OG3-H=h7~uaO`s4XqAST6v}fW*y)@72MWkXFX~SPB>XZ7+M{wN1<~M8>ef~d zlLYkLo@N^{Zjvd+ry6H-=72s`gR%KaVj?R$d&J${9CEnw#zn@m(OAII@4g;s9hOCC z7IbwfV=iYJ$u+0qTQJt)r($8)yX{8qQL27CLx81=-kq zp{|DF1$>1fBPnOW#{ky*3xeT-X!-`Ty9|hTa2-Sd!wDQH6DA$45r0)-;7+^&s~o&b zES#JzW%m%b9D!y8Sy;8WC_Bpn^^0DO#k^u;{8~@i);{CLvQE=PtxbTypMnkmC<1tL z$jFjxWnu6HKaEF48IE48T;?61N{E3QY7E-qNg@h4D(^^|!6Lp3!uMcNDkQq$81VUn;; zJAp83d{yf-ySh+K+2u_Eexg%b=3s zeZy=}JRn5Zpg;0Nd@P_kJdiC72*&YY&Bu5_OG_?-lnA0ZMq6{F(i8K;Gcp(=b%B}? zh_nSEPdGR@T*=DXK!6v{-?Jw-(4H8<+eU!|@E9Y6pF!{N2Ks4S*2qXeP&_?>Xb2;% z#=TQkk|_a=Tzq=Sm70OpCPp}K0;a1}qgU7lsC}rZsbM@Ioh#=a@Uv- ztVN`UkysJ}=#FBE+4i%kQd}UF-p=TGpfj~%@DR>hk^dxx0@yA(~MQzt0P?BW8qo} zgO~#9bcFofXdTB4rX#XfL8jltpSKVB^$H$CzzwUoaN&qUQ-l4d0&zREJBo@2k7 z#x`<|p(iUj^2El9rTR93I`)k#!nK>>6Gf{6mCWJ3n5a(`9g#}9NH9QQD9KS}QV7k3 zm5gPfIlxksd>EhwTm2o;&N4`#P;V0sE-uY-$W%Dl>{J>N5n)t74yY$m+}WUE1*%$L zw&(oVh!1-HRHM|aEU#n75C(#q8g}LC>K!EN4PFy0c-dYLkVya1i={TWr@07#^!|k` z+jP?jp0UR@U1}0zv>Jd4#&LyepP6_0m9{Hw-1@oQ3(B?kMNiudHpLv6`@k=)E8&+} zYLm2HDn<2V^5KI??i^^ib655jK4Up0>FZgF%H~xC`*W^)x*~e9E=d>o1ru!*meSO( zHZmpL8UOr4)}GfZYlw$kj9uZTZv6C6&KTLVtu#G;#0zG9qjemiHrq}H2ay~K}#n(Z*3j-ufy%h zEaTL_U=!DpGj3y*WUv^1P>Zzr_gojHKHyf+aq&6F!Q-WdZ`NpzTwr7TR=TUjL#B?; zY+rux)7nnmX7_NRP0@4}T<4s}e7M&ft>g;K)(d=Q4&ea}7PYORIgl&sd`Y}$B4 zQM;6eV8UYxOX~`eFjRJ_@7+_%C_*eDG+KiW1c%RPcB);WNyxWSO)d=^`RF)6|91QDl86+3Ba6&msP+^;nt9s zkZ=Pch1(9!64b_zF`2?x{KwCqLs+#aTLm_51V)dukPLSOZjI}vrm70(-__Cn{lmN+ zZo6BoqHVR@j;rk@^Z#Wuz`sQL$9H{0y#dBf)nvaU@ew8{NvV#4}+*i*Oy|^k1}bV_j@9jXy>3n zyr-|d?~e(^hg^G{A#k}5VV#uJ9yB`ox-YBEE)@Y5Yd?JmR=oNHyDkasR6w3DO5zac zot`BKg(0sAp^%b~6KTn17s+N5-BQ))bEtfDS}SGVVrA%PUQ8yg2nlxfK@Z?wH?26N>DyHjvCc+G|kSAS|D0!-n|yc=5PIG*C!9 z!4*XI)d-p^wtM^=EhhdA$V^iW#Tc7#pdy#$M8kP%&?KXzpijCqWy+x2bO3;Sk_mJDds@}CO-82>gSDIkvpE~0*(Kk?tDF) zH$KdDeT8~0;RpP1MB`v5-FV3F!FIv1tfkLw?}0}ex%O%(7c0andb0H1=97&re5 zUV-Ep3-A;rB_(1tDG7XTNUU&o9P7=CL>GX)2QH_>S0JsxoO}9d$gaHd&mX`K!KDwo z(sM2)Ruvh}-gszRgtUUMxndknJCkqZZD6CZ`gur+DwT=b-!^3Tr)_HNQFK37I47bB zs&aEjp~D_ze!Ikckd1jN>CjClubu73blDcsy-96yn1WBuWHHhL9sydT&gIcgq1hOeDPXePFf+I_ndbsO!N|K+2~o$EWJ zd*os_c0bf<2$Vhiz*$J;qC$~;!C7ER8;x8=d`%oNsq5_@sHEMjf8!zs^9z?%R~x3b z{r&(sa%UODJsN`KD5JXn>|WO7eDYt&AF@i*L|hVvupr3fkbQ`&t1Fp)gBJ`q16Kzg zyuqFQF_FbBBO?yK-@g`>KZ;e1(LZ}NRB?|YI1nP%`!*FQYf)kSSz0QUd5oqRvT~H1 z_(u3$Zfjclt3B`IbIcFDVg9A(!VvZDMaPi}k5o9TU_?U^2{rSfVw*^N{F68vpwA@h zXu=ycGG09bB_uI!5swavXLLxww_z0)Q1(cHxy{&^kwZD{wqoTQ{9D*`zacEUR=P_; zsua@&amFIT0Dz3Dsw#3kJ(2dIrL_ur4A(C5zc9?Xqr(Gd1dO7Xapa(*LsZLlJkBZB zD~x*JT53l9xCR7YgBIUEN0qH20pJ9i5s}&B*+|R>DDQ<5pO%RauX&%4Q1!%W$6S$_!(I|X|?5Ruz*I0Z(*Wq+&0+WnRjYn+N-{`HnO;~sY=_i2`CQbLyndm9O_I5<4OH@b7@ z4p|F=VI(!l$ycwsoj9S1<$_@V@*KPGfsxV!J|h4Ne`9C*+3oL-H3jm z?xwG6Oc}CqVK^!R`v;l=%ysc{b&Wjo9AjuPspBt>L$dHiKY_3jX!E}eRrf!8CN3xU z0IoRGeVg{Bi1>e~{-ZOMIj9hOx=GF$O%!hw>M&&e&!LLOD@Q^Ivb{Xu{=3=-2;=FHnQ3wkQgUA%uNbRP%yLyR= zz1Tn)_*8iD;%@vuKw+h&?_ym5!r;4n3oiw~-PzM~AK~;^a%VI^fTfyC#-WR2J@g^|O#Q<+sNDHoJ!h_7i+TLODhaw83A zIZBk0_itHB78n&De_AlKa3iSpLS}%{=PKto=qbcrizc+C2@`yWM%i`MkMnqguWfyZ)ms({lz*;N^Dm-E5U%sbu(5oR*A0eBB5rr z12!OSaK_zn-#o{fEcg?*6s)?4-}8X+bW%<#MdtHlyvEZleljYWTSEFq_Qdk++Shtr zd85bI>ax~b3C~v*mkk(8JoEu74 zRDSz#3}di3 z0&VZqV>$J~L(*gVO7O^rsRmjm<+Ns-W&_DrTKhbA5(5a#`3Vm)L<86~X#5r%K#ab- zY##ouQ`U3W7;UOSYSxud?-x5uP4AgX(d-jQ+0v=0^!tj;p=a0RPX9h_YG^S&jK2-JRJ&TBo3of?(8H<1Dw(rCIT21 zqWMj85?M%77(m=1uy*^$r;Be;v95mRq1$zafpYgUeL^f(`s$O-T*tO%0anJNLYmfc zOTiQ4G}o;%GUp>AaMzN41oYdp!*xOg#PO*7fpL(B0!S@%)cBwmGx=GHgFvXDv7mqI$GeixF zS~r?`d9C5iTTlPCbv)gpf5yTD-uZaF3&A z+B&%Y5&gNp`#||?YCc(62_Bg}7vtAJx~>2H0zX7eKxqlP2_U*Z$S-+U?ROjCr*5vW zb7Biev+|iXnXTy_^?$eT(Rc4V^RH~hVnR^a8Q^7Ly?~2+!3ZeS{&y^k+GR7*onQx| zivzZ)hjs+vj9%SKNHziG09CvO= z*c>}`qPNyn<>%EwQ5$+EJ{6IxMUTwvC68ZUROD)*rgF5RIeIN!v1cwpVnZv%-j`HS zbIZ*9BVFmo+DwLjX!I;;sh9{MKjPLcaX#{tCPol)$v}< z7xEAk2&)jrZIZtb<~}ub+f))K7kXpM5?i#f5Xb~ApAyxK671T>RC+C~K&qX&l%gK> zE3SsFql)h@CVy3x(cmN?$3Wg#&24odc`RDAcratVO?%tB<3hZ1V&+p5-RNHThZPln zKcR769E(H63>W5yeVOiVZjfT4uv$+vnq;iiqdtb}8E!!QLhKx-4B@j(|2DhikW3Th z$oH9FyJt0c>{5&pSJ?602I0~=ql zM?fup=3W%6dCqiOU$hrsHAZR!PKJir!ra{d``E5XU3q}$KtX|{U^+I0*g(@O^YdLG zxq+nT0t;eOk*JT84@M-d1c>0VGIs7fXHyKyRqo+OJ0z@PJ6%}MKJB>jU8WO$(yiOJ z^>lV_5fhsTxV2j*&HFYI8X%2EjGVr{w}3=)``6k2qdW$;`vIRW0LvFmj1GMZjs;Qg zdWe>Ry${tI=0AZ7Y@%Sb*O;TH3^3$+C*b*Osq82+~<+-MGgm-#EXr-OPU2At;C&6HiO4EMzL> z(|?*3ZfuRo3LKmB@=|R*uePaHaQ5qhuLMujnXlDTz&7(Oi=cMkw(Y%DAnWplMZ@Kw4&bE7=RlEefTl)U(V zpCPhoXy`^#5;@}qM5Ow*ZFALAS#dEjONinZ4F1;8Di|~iuLhnKQGe3#N?O|Q23jGs zcL?>FLJm+S_~p(rP_sBuAw(rpd+JtyZDWz4Q3Y25lO(t%$R)DRek#N$(wdTG%esR# zi{bz*03wt05^!Q;?vuj}eUxgH-YAU`V~#@h5|6y7oZOWSeCQmjYijzGXdC(j99iGb zxN0@Yi6&2*_KHN7>x!qQzD}d5pTdCo#!^CD9DnCqWhGln>5$K#I*{i! z(=Q~knaM98pz6>mr~Bzw^}`IS=Bop8=c@1C{WN+}NOsGXWeA&azoQWa<>G?lHPvY8 z$-YHj#HNnkRa0Ag0D264v$BE$9ET~c)@pfgzpt^Z*k{2ti^m5#z|z8EqgF8TQ#5~c zDU$fHHyAgFnBj3j$mYbv1a41Uj(uN}$yu&1U%JSLApkrpc)lGP(I`ip;smjE>sI`w z!>F*ZL9~`8lJWd?-i1pMh*-=mDd~2Q1HQEIA&6P+`0+QTx;M1LLP4A`>8lwGzOq-* zR`o_@hL;3~CTX<<(irr6&~1~bD2$WS$})c+E-?fnZ(n<(T2Y&4OC_t{rqetDF7I5k z7qr(#E0~Wr9yjwa+aHuAsC+7g>A3TEduielulA}==xPw=@;>$b_-pO&@JdI=bKq>9 z-GW+PH{xtRjMklKUt2NZ(5cZc3wx1M+tHD5{(MK#U9I_rZ7`WpRo(WBp~@AEn3Fnz zz#W=(e>;zHRlO>Y&dL4_SqG{gVC3y{zx!_u50$%_AZ{f&xgTnk{dCp9Rnd8Oc64Z= zpM)xz1_8#-97J6`osQ^WJnx3|I96xY~qZq+aw8&ldhSy+m8;}Uu-Md?}L;u9k{W&@O_x8TG+tXP?-!4o{ zJmG#Fb7wiRviIlyEjtZz`fvX@j-wWDc<237XVlzVm%kNrQL+ zUZjJZJbAy@-Rnr6r?rTz)dOL5^^E&0?RJa_S1Lp4?e0>oeG3>kV&{7_PCLe-PUp{P zgoC}_Y9`$phv4w()!%MbD^eQsUmeBszP7Gu=07dtj)veih(0>v9Z!y zwtVJ^Q}To54NFev(V>AEf3(IwZ{q=@%(BPe2?7di7gyI(8H7*n%G`g*(vpT~4B2_C zorXYJ;>-i*+lvw>LYOV))923=!bs=49PgMdU?v5%oq0yovs^?l#_KJW9~&;8u@Bu)q@ z(l#|=^CpRDUKcxSZvGQn(A*2bnd#}nhYw>N+1lD7V{C zYQ0%- zn?*L;*oB>bic?AX=9uWX>?9RwxK+SoW!YECYtO1|is0a1t!aPNa$m8G-_gudvzj+U zQKVeqpR|(Oqyt&m@U?kyKw&9@j2)*W0xn&85aqMpViTIc?~odq5npg3zf{$}F?vAM zrq-uKciIha?V5Bi4_u!sPT_ZG`x!4eZB`_panl)q+z2=zc#&axLd&0vt?qM{{JDzw z#H&+zc-vx)MS0+A@{3uy0-g}1R{hRhbm2sBd6w#}pL3MP;etX9%P~-Eq?n7p zt?QDsjIZ0|A~|WJ-h}I1${}A@*?`xj7|3EnMeuyiw@kF6evwR1xTjE^wefmRygqN% ztUOSIUy^0x5>|?|YshyVXv91Fvrfr5#PlA)wt6z#c27V~<~b@mlH0WB`5C5tyg4DZ zau*AD+*TF1Hh!i?glxQFz+PH%A+x3!U$1OadIDQ{O{P2V9{s6^jQIAWvjzD34EJ-= zl1_g-=yJ&C{~Lsa#%Is=@`UVY)sI&9TkZP0P(~krkprLbW-zQgo#1 z%kJ^0&m@&MLBA?8pB0Z{Tr6+9(ax>?zsqkTsL?#okN?_UHr@~)ya~5#_Frq3zvF~O z?@&2B^qJ1y2)UPWtMPd-s|{E=J;uvg)n)c1^?L6dW*rkB#k9QX>6< zpyXU|rcRhZTPO48%{r%-d^b%6DU`gPshB+t=4Rxht9!1xFoF^p&$&#=D$+)l5?(FG%Vx35f@ZHZ#%&@Q=;u<>srleC1A@$AJf3o^uAWI-!otP{nqi>fL~@sar^8HHKaHg zI?wm_S`t`OML9Fv8Uj)DV*Y8~0dlL+YiPYBI0fmN+g?05g)Rlf#ly8h`z+%(UO!;4 z6BUC8P%Lz0n22DbeT0;h6f$o`E=Q#d#qA;cYlpYz$Y+1|0UfCl7+0KFFccj7oRr`` zu#DOdOwQcqk;@Lnu^I8l1lwPI0P~9oL@XB%9QhesJsbfE8!q%b)6J2=Ka$rV==JFb zuMCj1j$2w9Qre+@*WAoXET2)mcdc<)T-Q#A(%#l)CVvf!{t49zGM6UOQ&onni|W`L z7ntna=Np)}LdI=wm?;9$LDzLkz`QsLpWf5Hn1C{diRd>IA$0ZhCd~X4ODnOh#!1XL zNh>?U*ht9lX$2VpuvOihg(H=j(3!B1;UBsE$b$S|585Q0xn;I-SWCgk3LJ|Lk=5$e z!JL|yjla+N4(p(!pQoRNvX`>4ZS4&6PcH}YuQ72;TQlL}s*_yKLXwpzk%j66n`$~u z4vTV9c+FeDS`2pcwNWpwkN9Bgk9~AO+X|LIAJ3Ialh?KVrVBy~C{jc1kr}l84 zuY&{~J=Db-$eLK*@I;kukRBYNK4E6yHZ?(FW?Lb{6HIRL-nf`Usx%mAaF4Vfv2 z+aZBf){Kw>wKF|hP5OHUx2efVFt6tnzO$nuz!UiC5BA%KR0Kpb-@Cr2p!AKq&8X}dHYF5X7 zs>cC{OANkaFnrZaixlr}YBI#A_IpyZ)OU0f0iSEqXN+igIPIDhT7g9*c=nPXKt--+ z09?a?)1yBlocvGS($Ff@o7=Lqq4uDb;wU5yeV;B#tXVg$VlF_i(6`qKW`m1DN!ts^2Ce$#Cb{Spj5LE)`ef7w8NO7pMU&UcQ@dx?&0Aq zQ&DE}aUDK#1PcMx3jkzJ4i1t+jhYiK!RQ*m%Gay!;fujRJIO1sqcga399oH&pnXV> z;9C@}5@21V(o0guRFUzvw6uWVprWh{jaWPRYb0+1%CM;PWnY~a{V+LsJu535B|8S5 zrWS*V_@pG@lJ!;1YFP`=+XZxaOIAKRw9w>OKr zAUWwzlaf3APzr7f06h~?@g^qLKo$ryb>N8-5%xmAJA)QE^Y7kPD@}P*#7n1KPLUih z+VMcsl{>!G+X-y7Pyymo+j~}lqtZwtCh0}n^KaGgq84k*`9m4>esm%kJEj2MV5$kc zXxJ8Uv4C^;9qMLqqsi@Z2QKS&k1?6rre)7Yy5qbl!>GmzAKg9XjqH5eog>dD*e4b^ z>1?NMD|?fX&4h9u6;Bz1%K_*a`6d~<_1b8w!mM$v1W=W5GK3Xfucf7-ZH*4cEj9lp zPnC7llN*x>pfY5s`LpzYe)a6xP9>%DSYYeR&Dv~K6iJ>cAOhRvF}4W`R+g2Ww6p}j zC`}6xa@~s;FMx1?X%EIf;S0tI(9wqvl~yLLc3s?AoBG3|+P6g?503{{vM#3uh=t%f zW2s*c-}z?t>eZ{v>(`M4pk@Za@1>KwyZd>-a*K;_QU=LR^flwl)ygv9kjJc{47q8H+v1Fb|GwpFTAV ztX2ymFZPdK%4&G6_j6-S=2wOA^`{@dolN(rRQC!97i!_d?-OPDIMiP~Pw6uX*Z**B zq9og(buBG|KL&$B<#n8AGW)jZpUdbgVYelOfY7mt?T!*=ReP-uTTv0@7b71WwCoIZRGY-U|KFhd~d$f$tZ$ zY}rz#m&>3zoxTX{R5=QF*EHY5+xza*lV{JiRlc>y4ztLZ9oA(#UGClmfD?!`?d^VL z<@Om{LIS}Ro~yaJjgkTr59Z?-9iLtA?%JDSLf1<#;`Ym}iCfax>EWW1i9{555#X!5 zygUG!Y*g{kg)0yY_x7HJEa6?VEN-vwJBV{wXUG45i)I>HWtiHed3qkicp;!^h>)EP zp11=d7@Qu!0dj@-9F8b-T;8k7tE#3qO+mR+C{_(D8GJ>wubwF;f$anMX86+l(Xd|g zVi*HfzUk{DMY@pt!k9g5EcpNiWWhK?41~(x+V_v)9_9T|d%>y$6XVv|Pw}(jk(U>9 zDStxHitg)6b919cADnwmf#TcFXmF!mzWg**=*^ z3H$W&Wggvl-5jfjPV7`Cbb){sM_-0a89AZ15CcCcI8z*D!>mMp}QyFh36r5BN#8bEtt&#EO(e~J6NuO&Jblm<*f7?_yTz|K3!ZX;^X0A#k%uGOIy|y z*iLJce}OOIHabn1`-CnGK_cE$Q%7uG_V@K+|Ahg01Hyxz5la*mo^f~Yx`3$g^y$-7 zt(Dyb_A~A_vkrKyCr(fZP|_K8wro_HwEMZaf8$(AO-KmBWD5>O|D9zV`_LQ?wBFhFF*4Hgy=fy#mh)P8)14)#~_4!Q=;Bo&C)c|%5&B`T-1 z^hzu*p{l0Fm|+F44y!n9c{C1fCQMr8VTP=z$Q#n*U>bb$RBHBHSh?;K4ABd{%&6_j zhL{XTn8T1fNPY-db2Ard+jIxGu$D=YHT2yOMb4ZNBYZ`Tn136qx_|#ZSmI`xD#^31 zI;$$Vt6Wq1POsz7@p8>rP=tjK2jojeL+TJb^eu#HiioQ32+oQc2=|i^pQ#Gn=tRmG z)-u)&YM|kz%dw8e#w}QsOgb`YmZwfVl7;yi913e`49S&Aj1u2D{_{DzCBSRIWN;pj zU)Zaeq+NLU&dLYv^+VtU&Q#*Y>>25t4@HVsusvA>;sr6fbI{KLWDA=f%>xGlu)VR` zZ(>>jkPd^QYgL3g8qj7InJDki)|qM z= zJe*`oQtfZp$}fHSBJJhjrzr4oS1X>haonwQWlmGQA7AB)=YG1YDLFpw%&;mzCkd6= z4cN3}R-xtfftQ|x)%&HSoM2|u$l{=1lEx3)+FvU~9G926=L{I%Nt7s~M2Gol#nQJW zjf%qnrXEa8bBJFT`0?e(D#TRGV$6R0*yhs$`IrX}9-zI7@@81ZJTR=vz{Zf~A@=mZ z@y*j3A;V;CxT~d5M3ZNliIF*q!zUoJ{{DM~vaf31z*n}QnAC1MJOBBn#Q0}nErGAk zmQ^UhIe#(Gw6e7P9m5I-PF-6F ztnL-zymE_cAMc`E;|@=@km^Qb&!rFdr!l0UZM17&4x)EHc%CfgghgEi$Bd3tBY1Lm&|n+$Uv{GO@0no^^nx$kEXOy%g>lYcCJRGaf1$BRG4wgq7pRR7yY}nl%aCtMfP(=%Y_N#I z#jCEaR^mEgZaxYn1Dt1AnTQOexiDr02vVmMvwsW^*I=1gd(wgYdV@#3w z!u^xjgF#_)bKY>4uro1dVoBaQGGAOZESfH{ zzl6^Oa286Kk@VH~@80$H_k$@8+6oDh*KJwuxVW^dvj@fbN-nOvtGioFoqn&VXciv~ zY87(?4*UCElUQ585QAHhbAxM){f=B0jpgi)37lV`fe^^))l+RPEMUKlv7j(SyGMPt zCr`pK0ClIo|2!7PQR$97#kHFVWZGp-wiTYI?d?Y~L5ye*O$HZ4M$)nk`HZqzsp<4Z zs|P!QJr(@F;e!9h76#Xuf3w>sc{_Qyg&>H7bOS58o{yK8vp1cPIOJvT>MY^v?CpHs zp6=`<;SeNYWNIN{MDy{d`#YX@^QBAdc5=Sx=IE^H>+N;kKHx`=*47FP9umYM15^DX I-E)!u1>(1|!vFvP literal 0 HcmV?d00001 diff --git a/doc/tc.md b/doc/tc.md new file mode 100644 index 000000000..270ecd9c4 --- /dev/null +++ b/doc/tc.md @@ -0,0 +1,862 @@ +DPVS Traffic Control (TC) +------ + +* [Concepts](#concepts) + - [Qsch Objects](#qsch) + - [Cls Objects](#cls) +* [Steps to use DPVS TC](#usage) +* [Examples](#examples) + - [Example 1. Device traffic shaping (Egress)](#example1) + - [Example 2. Traffic classification and flow control (Egress)](#example2) + - [Example 3. Access control with TC (Ingress)](#example3) + - [Example 4. Traffic policing for services (Ingress)](#example4) + + + +# Concepts + +DPVS TC derives from [Linux Traffic Control](https://tldp.org/HOWTO/Traffic-Control-HOWTO/index.html), which encompasses the sets of mechanisms and operations by which packets are queued for transmission/reception on a network interface. The operations include enqueuing, policing, classifying, scheduling, shaping and dropping. + +- Policing: the mechanism by which traffic can be limited. Policing is most frequently used on the network border to ensure that a peer is not consuming more than its allocated bandwidth. +- Classifying: the mechanism by which packets are separated for different treatment, possibly different output queues. +- Scheduling: the mechanism by which packets are arranged (or rearranged) between input and output of a particular queue. +- Shaping: the mechanism by which packets are delayed before transmission in an output queue to meet a desired output rate. +- Dropping: the mechanism by which a packet is discarded entirely. + +> Note: Refer to [Traditional Elements of Traffic Control](https://tldp.org/HOWTO/Traffic-Control-HOWTO/elements.html#e-shaping) for more details. + +DPVS implements the above-mentioned traffic control mechanisms with two kinds of TC objects -- **QSch** and **Cls**. The framework of DPVS TC is per-lcore to avoid performance loss caused by multi-thread racing conditions. Each Qsch must be either a root Qsch or a child Qsch of an existing Qsch, and the root Qsch must be attached to a DPVS device. DPVS TC supports the following device types: + +* physical devices +* bonding devices +* vlan devices + +Tunnel devices are not supported now. Both egress traffic and ingress traffic are supported, but it should note that ingress traffic can hardly be restricted because it is mainly determined by the peer end of the network communication. + +Basically, **Qsch** consists of one or more queues, a set of operations for the queues, and some traffic statistics members. A Qsch can be installed into a DPVS interface device as the root Qsch, or into an existing Qsch as its child Qsch. The root Qsch receives all incoming/outgoing traffic from its attached device, while non-root Qsch can only get traffic from its parent Qsch. Qsch can be divided to two definite different types -- the *Egress Qsch* and the *Ingress Qsch* -- to perform traffic control strategies for packets transmitted out of or received into DPVS devices, respectively. + +**Cls** is classifier, which consists of a set of operations to classify traffic into different Qsch. A Cls must be attached to an existing Qsch, and it determines the fate of the matched packets from the Qsch. DPVS supports two classification target actions: drop, or enqueue into a child Qsch. Classifying into a non-child Qsch (e.g a grandchild Qsch) is not allowed. As a consideration of performance, the packets are only enqueued into the last matched Qsch, rather than falling through every matched Qsch. It means that if Qsch are configured hierarchically, and a network packet matches all the Cls from root Qsch to leaf Qsch, then the packet is enqueued to the leaf Qsch directly without goging through every Qsch one after another. Each Cls can only process a specified packet type. DPVS Cls supports three packet types -- IPv4, IPv6, and Vlan. Priority is supported by Cls. If two or more Cls are attached to a Qsch, and they all match the packet from the Qsch, then classifying result of the Cls with highest priority is adpoted. + + + +## Qsch Objects + +DPVS TC implements four Qsch objects -- pfifo, bfifo, pfifo_fast, and tbf. In principle, they are almost the same with the counterparts of Linux TC. + +- **pfifo**, **bfifo** + +`FIFO` means "First-In, First-Out", it simply transmits packets as soon as it can after receiving and queuing them in the same order as packets are received. + +![fifo qsch](pics/tc/fifo-qsch.png) + +A real FIFO Qsch must, however, have a size limit (a buffer size) to prevent it from overflowing in case it is unable to dequeue packets as quickly as it receives them. DPVS implements two basic FIFO Qsch, one based on bytes, and one on packets. Regardless of the type of FIFO used, the size of the queue is defined by the parameter `limit`. For a pfifo the unit is understood to be packets and for a bfifo the unit is understood to be bytes. + +- **pfifo_fast** + +Based on a conventional FIFO Qsch, pfifo_fast Qsch also provides some prioritization. It provides three different bands (individual FIFOs) for separating traffic. The highest priority traffic (interactive flows) are placed into band 0 and are always serviced first. Similarly, band 1 is always emptied of pending packets before band 2 is dequeued. It's advised to use pfifo_fast as the root Qsch. + +![pfifo_fast qsch](pics/tc/pfifo_fast-qsch.png) + +There is nothing configurable to the end user about the pfifo_fast Qsch. For exact details on the priomap and use of the ToS bits, see the pfifo-fast section from [this doc](https://lartc.org/howto/lartc.qdisc.classless.html). + +- **tbf** + +`TBF` stands for "token bucket filter", because it is built on tokens and buckets. It simply shapes traffic transmitted on an interface. To limit the speed at which packets will be dequeued from a particular interface, the TBF Qsch is the perfect solution. It simply slows down transmitted traffic to the specified rate. Packets are only transmitted if there are sufficient tokens available. Otherwise, packets are deferred. Delaying packets in this fashion will introduce an artificial latency into the packet's round trip time. + +![tbf qsch](pics/tc/tbf-qsch.png) + +TBF QSch can have the following parameters. + +* rate: the speed knob, in bps(possible prepended with a SI unit k, m, g), upmost to 4 Gbps. +* burst: size of the bucket, in bytes. This is the maximum amount of bytes that tokens can be available for instantaneously. In general, larger shaping rates require a larger burst value. +* limit/latency: the number of bytes that can be queued waiting for tokens to become available/the maximum amount of time a packet can sit in the TBF, in bytes/milliseconds. +* peakrate: optional, the mximum depletion rate of the bucket, in bps(possible prepended with a SI unit k, m, g). +* mtu: optional, size of the peakrate bucket, in bytes. + + + +## Cls Objects + +- **match** + +Cls `match` consists of two components: a pattern and a target. Packets from Qsch compare with the pattern, and if matched, the fate of them is determined by the target. + +Pattern considers five attributes in a flow: + +* Protocol: TCP, UDP, ICMP. +* Source ip-port range: The acceptable range of packets' source IP address and port. +* Dest ip-port range: The acceptable range of packets' dest IP address and port. +* Ingress device: The device the packet is received from, for ingress traffic only. +* Egress device: The device the packet is sent to, for egress traffic only. + +One or more attributes of the pattern can be omitted. The omitted attributes are ignored when matching. For example, pattern `tcp,from-192.168.0.1:1-1024,oif-eth1 means to match TCP packets sent out on interface eth1 with source IP address of 192.168.0.1 and source port between 1 and 1024. + +There are two kinds of Cls target: Qsch or Drop. The former classifies matched packets into the queue of specified QSch, and the latter simply discards matched packets. The target Qsch must be a child of the Qsch the Cls is attached to. + + + +# Steps to use DPVS TC + +**1. Turn on TC switch of the device.** + +```bash +dpip link set dpdk0 tc-egress on # enable tc-egress for dpdk0 +dpip link set dpdk0 tc-ingress on # enable tc-ingress for dpdk0 +``` +You can verify if TC for dpdk0 is enabled by checking if "tc-egress" or "tc-ingress" flag exists in the output of the command `dpip link show dpdk0`. + +> It's safe to enable or disable TC of a device anytime you like, even if when TC is processing packets. + +**2. Add a root Qsch object.** + +```bash +dpip qsch add dev dpdk0 root pfifo_fast # use pfifo_fast Qsch as tc-egress root +dpip qsch add dev dpdk0 ingress pfifo limit 65536 # use pfifo Qsch as tc-ingress root +``` + +**3. Add other Qsch objects as needed.** + +```bash +dpip qsch add dev dpdk0 handle 1: parent root tbf rate 10m burst 10000 latency 2ms # add a tbf for egress +dpip qsch add dev dpdk0 handle 2: parent ingress bfifo limit 100000 # add a bfifo for ingress +``` + +**4. Add Cls objects as needed.** + +```bash +dpip cls add dev dpdk0 qsch root match pattern 'tcp,oif=dpdk0' target 1: +dpip cls add dev dpdk0 qsch ingress match pattern 'icmp,iif=dpdk0' target 2: +``` + +**5. Check configurations and statistics.** + +``` +# Check Qsch on dpdk0 +[root@dpvs-test]# dpip qsch show dev dpdk0 +qsch pfifo_fast root dev dpdk0 parent 0: flags 0x0 cls 1 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 +qsch pfifo ingress dev dpdk0 parent 0: flags 0x1 cls 1 limit 65536 +qsch bfifo 2: dev dpdk0 parent ingress flags 0x1 cls 0 limit 100000 +qsch tbf 1: dev dpdk0 parent root flags 0x0 cls 0 rate 10.00Mbps burst 10000B limit 2500B + +# Check Cls on Qsch root +[root@dpvs-test]# dpip cls show dev dpdk0 qsch root +cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: + +# Check Cls on Qsch ingress +[root@dpvs-test]# dpip cls show dev dpdk0 qsch ingress +cls match 8003: dev dpdk0 qsch ingress pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: + +# Get statistics of Qsch 2:0 +[root@dpvs-test]# dpip qsch -s show dev dpdk0 handle 2: +qsch bfifo 2: dev dpdk0 parent ingress flags 0x1 cls 0 limit 100000 + Sent 17308172 bytes 176614 pkts (dropped 0, overlimits 0 requeues 0) + Backlog 0 bytes 0 pkts + +# Get per-lcore config of Cls 8001: +[root@dpvs-test]# dpip cls -v show dev dpdk0 qsch root handle 8001: +[00] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: +[01] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: +[02] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: +[03] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: +[04] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: +``` + + + +# Examples + + + +## Example 1. Device traffic shaping (Egress) + +In this example, we want to restrict the egress traffic bandwidth of NIC dpdk0 +to be within 2 Gbps. A root TBF Qsch can be applied to device dpdk0 to achieve +the goal, shown as the diagram below. + +``` +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- + root (tbf: rate 2Gbps) +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- +``` + +Firstly, we enable tc-egress and setup a root tbf Qsch on dpdk0. + +```bash +dpip link set dpdk0 tc-egress on +dpip qsch add dev dpdk0 root tbf rate 2g burst 2000000 latency 1ms +``` + +Then we construct a test service 192.168.88.1:80, which forwards client's HTTP requests to a backend nginx web server 192.168.88.215. + +```bash +[root@dpvs-test]# ipvsadm -ln +IP Virtual Server version 0.0.0 (size=0) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP 192.168.88.1:80 wlc + -> 192.168.88.215:80 FullNat 1 0 0 +``` + +Lastly, we generate some traffic using `iperf` to service 192.168.88.1:80 from client 192.168.88.115(actually the same machine with the backend). It's noted that `iperf` reports a bandwidth of 1.93 Gbits/sec when the test is done. + +``` +[root@192.168.88.115]# iperf -t 10 -c 192.168.88.1 -p 80 +------------------------------------------------------------ +Client connecting to 192.168.88.1, TCP port 80 +TCP window size: 230 KByte (default) +------------------------------------------------------------ +[ 3] local 192.168.88.115 port 56296 connected with 192.168.88.1 port 80 +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-10.0 sec 2.24 GBytes 1.93 Gbits/sec +``` + +Meanwhile, we should watch the egress traffic on device `dpdk0` using command `dpip link show dpdk0 -s -i 1 -C`. It's expected that the obytes of dpdk0 is about 2Gbps or 250 MBps. + +``` +[root@dpvs-test]# dpip link show dpdk0 -s -C -i 1 +1: dpdk0: socket 0 mtu 1500 rx-queue 4 tx-queue 4 + UP 10000 Mbps full-duplex auto-nego tc-egress + addr A0:36:9F:74:EC:F0 OF_RX_IP_CSUM OF_TX_IP_CSUM OF_TX_TCP_CSUM OF_TX_UDP_CSUM + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 176156 176138 250339018 250310404 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 176185 176030 250288686 250059594 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 +^C +``` + +We can adjust the bandwidth from 2 Gbps to 800 Mbps with the following command. + +```bash +dpip qsch change dev dpdk0 root tbf rate 800m burst 800000 latency 1ms +``` + +Then do the test again and the results are shown below. Obviously, `iperf` reports a bandwidth of 771 Mbits/sec, and DPVS dpdk0 obytes bandwidth also decreases to about 100 MBps, i.e. 800 Mbps. + +``` +[root@client]# iperf -t 10 -c 192.168.88.1 -p 80 +------------------------------------------------------------ +Client connecting to 192.168.88.1, TCP port 80 +TCP window size: 1.08 MByte (default) +------------------------------------------------------------ +[ 3] local 192.168.88.115 port 56351 connected with 192.168.88.1 port 80 +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-10.0 sec 919 MBytes 771 Mbits/sec + +[root@dpvs-test]# dpip link show dpdk0 -s -C -i 1 +1: dpdk0: socket 0 mtu 1500 rx-queue 4 tx-queue 4 + UP 10000 Mbps full-duplex auto-nego tc-egress + addr A0:36:9F:74:EC:F0 OF_RX_IP_CSUM OF_TX_IP_CSUM OF_TX_TCP_CSUM OF_TX_UDP_CSUM + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 70988 70618 100573260 100024836 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 71011 70743 100530072 100025166 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 +^C +``` + + + +## Example 2. Traffic classification and flow control (Egress) + +This example shows how to classify traffic with DPVS TC. Suppose that our server needs to visit three services: a ssh service(tcp:22), a web service(tcp:80), and a udp service. We want to restrict the bandwidth of web service within 800 Mbps, and udp service within 80 Mbps with a tolerance of transient peak traffic of 100 Mbps. Don't restrict the bandwidth of ssh service. + +The diagram below shows our TC scheme for this case. Traffic is classified to TCP and UDP, and then TCP traffic is classified to ssh(:22) and web(:80). + +``` +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- + qsch root + (pfifo_fast) + | + | + ------------------- + | | + cls 0:1| |cls 0:2 + (tcp) | | (udp) + | | + qsch 1: qsch 2: + (bfifo) (tbf: rate 80Mbps, peak 100Mbps) + | + ------------------ + | | + cls 1:1 | |cls 1:2 + (tcp:22,ssh) | |(tcp:80,web) + | | + qsch 10: qsch 20: + (pfifo) (tbf: rate 800Mbps) +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- +``` + +Firstly, we setup the Qsch and Cls objects. + +```bash +dpip link set dpdk0 tc-egress on # enable tc-egress of dpdk0 +dpip qsch add dev dpdk0 root pfifo_fast # qsch root +dpip qsch add dev dpdk0 handle 1:0 parent root bfifo limit 10000000 # qsch tcp (!:80,!:22) +dpip qsch add dev dpdk0 handle 2:0 parent root tbf rate 80m burst 80000 latency 2ms peakrate 100m mtu 20000 # qsch udp +dpip qsch add dev dpdk0 handle 10:0 parent 1:0 pfifo limit 100000 # qsch tcp:22,ssh +dpip qsch add dev dpdk0 handle 20:0 parent 1:0 tbf rate 800m burst 800000 latency 2ms # qsch tcp:80,web +dpip cls add dev dpdk0 qsch root handle 0:1 match pattern 'tcp,oif=dpdk0' target 1: # cls tcp +dpip cls add dev dpdk0 qsch root handle 0:2 match pattern 'udp,oif=dpdk0' target 2: # cls udp +dpip cls add dev dpdk0 qsch 1: handle 1:1 match pattern 'tcp,to=:22,oif=dpdk0' target 10: # cls tcp:22,ssh +dpip cls add dev dpdk0 qsch 1: handle 1:2 match pattern 'tcp,to=:80,oif=dpdk0' target 20: # cls tcp:80,web +``` + +If the setup is successful, you can get the Qsch and Cls configurations as below. + +``` +[root@dpvs-test]# dpip qsch show dev dpdk0 +qsch pfifo_fast root dev dpdk0 parent 0: flags 0x0 cls 2 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 +qsch tbf 20: dev dpdk0 parent 1: flags 0x0 cls 0 rate 800.00Mbps burst 800000B limit 200000B +qsch pfifo 10: dev dpdk0 parent 1: flags 0x0 cls 0 limit 100000 +qsch tbf 2: dev dpdk0 parent root flags 0x0 cls 0 rate 80.00Mbps burst 80000B peakrate 100.00Mbps minburst 20000B limit 20000B +qsch bfifo 1: dev dpdk0 parent root flags 0x0 cls 2 limit 10000000 +[root@dpvs-test]# dpip cls show dev dpdk0 qsch root +cls match 0:1 dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: +cls match 0:2 dev dpdk0 qsch root pkttype 0x0800 prio 0 UDP,oif=dpdk0 target 2: +[root@dpvs-test]# dpip cls show dev dpdk0 qsch 1: +cls match 1:1 dev dpdk0 qsch 1: pkttype 0x0800 prio 0 TCP,to=0.0.0.0-0.0.0.0:22-22,oif=dpdk0 target 10: +cls match 1:2 dev dpdk0 qsch 1: pkttype 0x0800 prio 0 TCP,to=0.0.0.0-0.0.0.0:80-80,oif=dpdk0 target 20: +[root@dpvs-test]# dpip cls show dev dpdk0 qsch 2: +[root@dpvs-test]# +``` + +Next, we construct three services for our test. DPVS forwards traffic from client (192.168.88.115) to backend server (192.168.88.215, actually the same machine as client) with FNAT forwarding mode. Both inbound and outbound traffic routes are shown as below. Note that the tc scheme of this test works at the inbound route of "DPVS Service -> Backend Server". + + Inbound: Client(192.168.88.115) -> DPVS Service(192.168.88.[1-3]) -> Backend Server(192.168.88.215) + Outbound: Client(192.168.88.115) <- DPVS Service(192.168.88.[1-3]) <- Backend Server(192.168.88.215) + +```bash +dpip addr add 192.168.88.12/24 dev dpdk0 +dpip addr add 192.168.88.1/32 dev dpdk0 +dpip addr add 192.168.88.2/32 dev dpdk0 +dpip addr add 192.168.88.3/32 dev dpdk0 +ipvsadm -A -t 192.168.88.1:80 +ipvsadm -at 192.168.88.1:80 -r 192.168.88.215:80 -b +ipvsadm -Pt 192.168.88.1:80 -z 192.168.88.241 -F dpdk0 +ipvsadm -A -t 192.168.88.2:22 +ipvsadm -at 192.168.88.2:22 -r 192.168.88.215:22 -b +ipvsadm -Pt 192.168.88.2:22 -z 192.168.88.241 -F dpdk0 +ipvsadm -A -u 192.168.88.3:6000 +ipvsadm -au 192.168.88.3:6000 -r 192.168.88.215:6000 -b +ipvsadm -Pu 192.168.88.3:6000 -z 192.168.88.241 -F dpdk0 +``` + +Check the services we have constructed, and make sure the backend services are up. + +``` +[root@dpvs-test]# ipvsadm -ln +IP Virtual Server version 0.0.0 (size=0) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP 192.168.88.1:80 wlc + -> 192.168.88.215:80 FullNat 1 0 0 +TCP 192.168.88.2:22 wlc + -> 192.168.88.215:22 FullNat 1 0 0 +UDP 192.168.88.3:6000 wlc + -> 192.168.88.215:6000 FullNat 1 0 0 +``` + +Then we start our tests to check whether our TC strategies work. + +**Test 1. Ssh traffic should be routed to Qsch 10:0.** + +Try the command following command from Client serveral times, + +```bash +ssh root@192.168.88.2 +``` + +and watch the statistics of Qsch 10:0 from DPVS. Hopefully, we can see a increase of the statistics. + +``` +Every 2.0s: dpip qsch show dev dpdk0 handle 10: -s + +qsch pfifo 10: dev dpdk0 parent 1: flags 0x0 cls 0 limit 100000 + Sent 10508 bytes 142 pkts (dropped 0, overlimits 0 requeues 0) + Backlog 0 bytes 0 pkts +``` + +> The `ssh` command may fail depending on the setting of the ssh service's config. But it doesn't matter for the test. + +**Test 2. Web traffic bandwidth should be bounded with 800 Mbps.** + +We generate web traffic with `iperf` tool, + +``` +[root@client]# iperf -t 30 -c 192.168.88.1 -p 80 +------------------------------------------------------------ +Client connecting to 192.168.88.1, TCP port 80 +TCP window size: 158 KByte (default) +------------------------------------------------------------ +[ 3] local 192.168.88.115 port 5578 connected with 192.168.88.1 port 80 +write failed: Broken pipe +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-29.8 sec 2.66 GBytes 767 Mbits/sec +``` + +and in the meanwhile, watch outbound traffic of device `dpdk0` + +``` +[root@dpvs-test]# dpip link show dpdk0 -i 1 -C -s +1: dpdk0: socket 0 mtu 1500 rx-queue 4 tx-queue 4 + UP 10000 Mbps full-duplex auto-nego tc-egress + addr A0:36:9F:74:EC:F0 OF_RX_IP_CSUM OF_TX_IP_CSUM OF_TX_TCP_CSUM OF_TX_UDP_CSUM + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 69922 69885 100264510 100210234 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 69937 69898 100368080 100307780 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 +^C +``` + +As expected, the `obytes` of `dpdk0` is limited to about 100 MBps(800 Mbps). + +**Test 3. UDP traffic bandwidth should be limited to 80 Mbps.** + +UDP protocol doesn't enforce any traffic control mechanism. In this test, we generate 400 Mbps steady UDP traffic(more than the expected 80 Mbps) with `iperf` tool, and see how much is forwarded to backend. + +``` +[root@client]# iperf -u -t 30 -b 400m -c 192.168.88.3 -p 6000 +------------------------------------------------------------ +Client connecting to 192.168.88.3, UDP port 6000 +Sending 1470 byte datagrams, IPG target: 29.40 us (kalman adjust) +UDP buffer size: 8.00 MByte (default) +------------------------------------------------------------ +[ 3] local 192.168.88.115 port 3192 connected with 192.168.88.3 port 6000 +[ 3] WARNING: did not receive ack of last datagram after 10 tries. +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-30.0 sec 1.40 GBytes 400 Mbits/sec +[ 3] Sent 1020410 datagrams +``` + +In the meanwhile, watch the outbound traffic of device `dpdk0`. + +``` +[root@dpvs-test]# dpip link show dpdk0 -i 1 -C -s +1: dpdk0: socket 0 mtu 1500 rx-queue 4 tx-queue 4 + UP 10000 Mbps full-duplex auto-nego tc-egress + addr A0:36:9F:74:EC:F0 OF_RX_IP_CSUM OF_TX_IP_CSUM OF_TX_TCP_CSUM OF_TX_UDP_CSUM + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 40640 6614 61444776 10000368 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 40642 6614 61444896 10000368 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 +^C +``` + +As expected, the `obytes` of `dpdk0` is limited to about 10 MBps(80 Mbps). Note that the inbound traffic on `dpdk0` is about 480 Mbps, far more larger than the 80 Mbps outbound traffic. + +Finally, we check statistics of all Qsch and DPVS services. + +``` +[root@dpvs-test]# ipvsadm -ln --stats +IP Virtual Server version 0.0.0 (size=0) +Prot LocalAddress:Port Conns InPkts OutPkts InBytes OutBytes + -> RemoteAddress:Port +TCP 192.168.88.1:80 1 1969674 103645 2938M 4218283 + -> 192.168.88.215:80 1 1969674 103645 2938M 4218283 +TCP 192.168.88.2:22 142 142 142 7384 5680 + -> 192.168.88.215:22 142 142 142 7384 5680 +UDP 192.168.88.3:6000 1 1020420 0 1528M 0 + -> 192.168.88.215:6000 1 1020420 0 1528M 0 +[root@dpvs-test]# dpip qsch show dev dpdk0 -s +qsch pfifo_fast root dev dpdk0 parent 0: flags 0x0 cls 2 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 + Sent 762 bytes 13 pkts (dropped 0, overlimits 0 requeues 0) + Backlog 0 bytes 0 pkts +qsch tbf 20: dev dpdk0 parent 1: flags 0x0 cls 0 rate 800.00Mbps burst 800000B limit 200000B + Sent 2964690786 bytes 1968589 pkts (dropped 1085, overlimits 72327167 requeues 0) + Backlog 0 bytes 0 pkts +qsch pfifo 10: dev dpdk0 parent 1: flags 0x0 cls 0 limit 100000 + Sent 10508 bytes 142 pkts (dropped 0, overlimits 0 requeues 0) + Backlog 0 bytes 0 pkts +qsch tbf 2: dev dpdk0 parent root flags 0x0 cls 0 rate 80.00Mbps burst 80000B peakrate 100.00Mbps minburst 20000B limit 20000B + Sent 300110832 bytes 198486 pkts (dropped 821934, overlimits 74624250 requeues 0) + Backlog 0 bytes 0 pkts +qsch bfifo 1: dev dpdk0 parent root flags 0x0 cls 2 limit 10000000 + Sent 5676981 bytes 103787 pkts (dropped 0, overlimits 0 requeues 0) + Backlog 0 bytes 0 pkts +``` + +> Note that some packets are classified into Qsch 1:0. Can you figure out why? + + + +## Example 3. Access control with TC (Ingress) + +This example implements a simple ACL rule with DPVS TC. Suppose all clients other than 2001::/120 are allowed to access an IPv6 service configured on DPVS. TC can help solve the problem by constructing a ACL rule (i.e. a blacklist) with an ingress root Qsch and a Cls. + +``` +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- + qsch ingress + (pfifo_fast) + | + | + ----------------------------- + | | + cls 0:1 | | any other + src: 2001::/120| | + | | + Drop Accept +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- +``` + +Firstly, setup the Qsch and Cls objects. + +```bash +dpip qsch add dev dpdk0 ingress pfifo_fast +dpip cls add dev dpdk0 pkttype ipv6 qsch ingress handle 0:1 match pattern 'icmp6,from=2001::0-2001::ff,iif=dpdk0' target drop +dpip link set dpdk0 tc-ingress on +``` + +We only set ICMP Cls just for conveniences. Then add a IPv6 address on DPVS, + +```bash +dpip addr add 2001::112/64 dev dpdk0 +``` + +and add two IPv6 addresses on client. + +```bash +ip addr add 2001::15/64 dev eth0 +ip addr add 2001::1:15/64 dev eth0 +``` + +Check what we have configured. + +``` +[root@dpvs-test]# dpip qsch show dev dpdk0 +qsch pfifo_fast ingress dev dpdk0 parent 0: flags 0x1 cls 1 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 +[root@dpvs-test]# dpip cls show dev dpdk0 qsch ingress +cls match 0:1 dev dpdk0 qsch ingress pkttype 0x86dd prio 0 ICMPV6,from=[2001::-2001::ff]:0-0,iif=dpdk0 target drop +[root@dpvs-test]# dpip addr show +inet6 2001::112/64 scope global dpdk0 + valid_lft forever preferred_lft forever +``` + +Now try to ping 2001::112 from client using different source IP addresses. + +``` +[root@client]# ping6 -c 3 2001::112 -m 1 -I 2001::15 +PING 2001::112(2001::112) from 2001::15 : 56 data bytes +From 2001::15 icmp_seq=1 Destination unreachable: Address unreachable +From 2001::15 icmp_seq=2 Destination unreachable: Address unreachable +From 2001::15 icmp_seq=3 Destination unreachable: Address unreachable + +--- 2001::112 ping statistics --- +3 packets transmitted, 0 received, +3 errors, 100% packet loss, time 1999ms + +[root@client]# ping6 -c 3 2001::112 -m 1 -I 2001::1:15 +PING 2001::112(2001::112) from 2001::1:15 : 56 data bytes +64 bytes from 2001::112: icmp_seq=1 ttl=64 time=0.108 ms +64 bytes from 2001::112: icmp_seq=2 ttl=64 time=0.041 ms +64 bytes from 2001::112: icmp_seq=3 ttl=64 time=0.051 ms + +--- 2001::112 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 1999ms +rtt min/avg/max/mdev = 0.041/0.066/0.108/0.030 ms +``` + +As expected, ping DPVS IPv6 address 2001::112 failed from 2001::15, and succeeded from 2001::1:15. + +Finally, disable `tc-ingress` for `dpdk0`, and ping from 2001::15, and succeed this time. + +``` +[root@dpvs-test]# dpip link set dpdk0 tc-ingress off + +[root@client ~]# ping6 -c 3 2001::112 -m 1 -I 2001::15 +PING 2001::112(2001::112) from 2001::15 : 56 data bytes +64 bytes from 2001::112: icmp_seq=1 ttl=64 time=0.178 ms +64 bytes from 2001::112: icmp_seq=2 ttl=64 time=0.054 ms +64 bytes from 2001::112: icmp_seq=3 ttl=64 time=0.038 ms + +--- 2001::112 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 1999ms +rtt min/avg/max/mdev = 0.038/0.090/0.178/0.062 ms +``` + + + +## Example 4. Traffic policing for services (Ingress) + +In this case, we have two TCP FNAT sersvices: 192.168.88.30:80 (Service A) and [2001::30]:8080 (Service B), each expecting a bandwidth limit of 200 Mbps. Besides, service A restricts access from 192.168.88.0/28 with a bandwidth limit of 800 Kbps and 1 Mbps transient peak traffic. Service B forbids access from clients in the IP address range 2001::/120. + +The tc scheme is shown as the diagram below. + +``` +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- + qsch ingress + (pfifo_fast) + | + | + ----------------------------------------------- + | | + |cls 0:1 |cls 0:2 + |to service: |to service: + | 192.168.88.30 tcp 80 | 2001::30 tcp 8080 + | | + qsch 1: qsch 2: + (tbf: rate 200Mbps) (tbf: rate 200Mbps) + | | + |cls 1:1 |cls 2:1 + |from: 192.168.88.0/28 |from: 2001::/120 + | | + qsch 10: Drop + (tbf: rate 800Kbps, peak 1Mbps) +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- +``` + +Firstly, we create Qsch and Cls objects described in the diagram. + +```bash +dpip link set dpdk0 tc-ingress on # enable tc-ingress of dpdk0 +dpip qsch add dev dpdk0 ingress pfifo_fast # ingress root Qsch +dpip qsch add dev dpdk0 handle 1: parent ingress tbf rate 200m burst 200000 latency 2ms # Qsch of service A +dpip qsch add dev dpdk0 handle 2: parent ingress tbf rate 200m burst 200000 latency 2ms # Qsch of service B +dpip qsch add dev dpdk0 handle 10: parent 1: tbf rate 800k burst 4000 peakrate 1m mtu 2000 limit 8000 # Qsch of service A for specific users +dpip cls add dev dpdk0 pkttype ipv4 handle 0:1 qsch ingress match pattern 'tcp,to=192.168.88.30:80,iif=dpdk0' target 1: # Cls 0:1 +dpip cls add dev dpdk0 pkttype ipv6 handle 0:2 qsch ingress match pattern 'tcp,to=[2001::30]:8080,iif=dpdk0' target 2: # Cls 0:2 +dpip cls add dev dpdk0 pkttype ipv4 handle 1:1 qsch 1: match pattern 'tcp,from=192.168.88.0-192.168.88.15,iif=dpdk0' target 10: # Cls 1:1 +dpip cls add dev dpdk0 pkttype ipv6 handle 2:1 qsch 2: match pattern 'tcp,from=2001::-2001::ff,iif=dpdk0' target drop # Cls 2:1 +``` + +Then we setup the two serivces. + +```bash +dpip addr add 192.168.88.30/24 dev dpdk0 +ipvsadm -A -t 192.168.88.30:80 +ipvsadm -at 192.168.88.30:80 -r 192.168.88.215:80 -b +ipvsadm -Pt 192.168.88.30:80 -z 192.168.88.241 -F dpdk0 +dpip addr add 2001::30/64 dev dpdk0 +ipvsadm -A -t [2001::30]:8080 +ipvsadm -at [2001::30]:8080 -r 192.168.88.215:80 -b +ipvsadm -Pt [2001::30]:8080 -z 192.168.88.241 -F dpdk0 +``` + +Check the tc objects and services we have configured just now. + +``` +[root@dpvs-test]# dpip qsch show dev dpdk0 +qsch pfifo_fast ingress dev dpdk0 parent 0: flags 0x1 cls 2 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 +qsch tbf 10: dev dpdk0 parent 1: flags 0x1 cls 0 rate 800.00Kbps burst 4000B peakrate 1.00Mbps minburst 2000B limit 8000B +qsch tbf 2: dev dpdk0 parent ingress flags 0x1 cls 1 rate 200.00Mbps burst 200000B limit 50000B +qsch tbf 1: dev dpdk0 parent ingress flags 0x1 cls 1 rate 200.00Mbps burst 200000B limit 50000B +[root@dpvs-test]# dpip cls show dev dpdk0 qsch ingress +cls match 0:1 dev dpdk0 qsch ingress pkttype 0x0800 prio 0 TCP,to=192.168.88.30-192.168.88.30:80-80,iif=dpdk0 target 1: +cls match 0:2 dev dpdk0 qsch ingress pkttype 0x86dd prio 0 TCP,to=[2001::30-2001::30]:8080-8080,iif=dpdk0 target 2: +[root@dpvs-test]# dpip cls show dev dpdk0 qsch 1: +cls match 1:1 dev dpdk0 qsch 1: pkttype 0x0800 prio 0 TCP,from=192.168.88.0-192.168.88.15:0-0,iif=dpdk0 target 10: +[root@dpvs-test]# dpip cls show dev dpdk0 qsch 2: +cls match 2:1 dev dpdk0 qsch 2: pkttype 0x86dd prio 0 TCP,from=[2001::-2001::ff]:0-0,iif=dpdk0 target drop +[root@dpvs-test]# dpip addr show dev dpdk0 +inet6 2001::30/64 scope global dpdk0 + valid_lft forever preferred_lft forever +inet 192.168.88.241/32 scope global dpdk0 + valid_lft forever preferred_lft forever +inet 192.168.88.30/24 scope global dpdk0 + valid_lft forever preferred_lft forever +[root@dpvs-test]# ipvsadm -ln +IP Virtual Server version 0.0.0 (size=0) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP 192.168.88.30:80 wlc + -> 192.168.88.215:80 FullNat 1 0 0 +TCP [2001::30]:8080 wlc + -> 192.168.88.215:80 FullNat 1 0 0 +``` + +Next, we configure four client IP addresses on the Client to simulate four users. + +```bash +# Note: please run on Client. +ip addr add 192.168.88.15/24 dev eth0 +ip addr add 192.168.88.115/24 dev eth0 +ip addr add 2001::15/64 dev eth0 +ip addr add 2001::1:15/64 dev eth0 +``` + +Now let's begin the tests. + +**Test 1. Requests from 192.168.88.115 to service A should be classified into Qsch 1:0 with a bandwidth limit of 200 Mbps.** + +We should change route to network 192.168.88.0/24 on Client to use 192.168.88.115 as the source IP address before this test. + +``` +[root@client]# ip route change 192.168.88.0/24 dev eth0 src 192.168.88.115 +[root@client]# ip route show 192.168.88.0/24 dev eth0 +192.168.88.0/24 scope link src 192.168.88.115 +``` + +Then send traffic to service A with `iperf` tools, + +``` +[root@client]# iperf -t 10 -c 192.168.88.30 -p 80 +------------------------------------------------------------ +Client connecting to 192.168.88.30, TCP port 80 +TCP window size: 207 KByte (default) +------------------------------------------------------------ +[ 3] local 192.168.88.115 port 45544 connected with 192.168.88.30 port 80 +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-10.0 sec 230 MBytes 193 Mbits/sec +``` + +and in the meanwhile watch the inbound traffic of `dpdk0`. + +``` +[root@dpvs-test]# dpip link show dpdk0 -i 1 -C -s +1: dpdk0: socket 0 mtu 1500 rx-queue 4 tx-queue 4 + UP 10000 Mbps full-duplex auto-nego tc-ingress + addr A0:36:9F:74:EC:F0 OF_RX_IP_CSUM OF_TX_IP_CSUM OF_TX_TCP_CSUM OF_TX_UDP_CSUM + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 18929 18825 25285652 25134812 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 18849 18764 25256206 25131088 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 +^C +``` + +As we expects, the results show both inbound and outbound traffic on `dpdk0` are about 25 MBps (200 Mbps), and `iperf` report a bandwidth of 193 Mbps after the test. + +> Notes: The recipient cannot decide how much traffic flow into it, and `iperf` could have sent far more traffic than 200 Mbps to our service. However, due to congestion control mechanism of TCP protocol, the inbound traffic of dpdk0 is limited to 200 Mbps. + +At last, let's have a look at the statistic of Qsch 1:0 to confirm that the test traffic is processed by it just now. + +``` +[root@dpvs-test]# dpip qsch show -s dev dpdk0 handle 1: +qsch tbf 1: dev dpdk0 parent ingress flags 0x1 cls 1 rate 200.00Mbps burst 200000B limit 50000B + Sent 949593386 bytes 701852 pkts (dropped 4368, overlimits 108243831 requeues 0) + Backlog 0 bytes 0 pkts +``` + +**Test 2. Requests from 192.168.88.15 to service A should be classified into Qsch 10:0 with a bandwidth limit of 800 Kbps.** + +Change route to network 192.168.88.0/24 on Client to use 192.168.88.15 as the source IP address and redo the previous test. + +``` +[root@client]# ip route change 192.168.88.0/24 dev eth0 src 192.168.88.15 +[root@client]# ip route show 192.168.88.0/24 dev eth0 +192.168.88.0/24 scope link src 192.168.88.15 +[root@client]# iperf -t 10 -c 192.168.88.30 -p 80 +------------------------------------------------------------ +Client connecting to 192.168.88.30, TCP port 80 +TCP window size: 64.0 KByte (default) +------------------------------------------------------------ +[ 3] local 192.168.88.15 port 24095 connected with 192.168.88.30 port 80 +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-10.1 sec 1.00 MBytes 830 Kbits/sec + +[root@dpvs-test]# dpip link show dpdk0 -i 1 -C -s +1: dpdk0: socket 0 mtu 1500 rx-queue 4 tx-queue 4 + UP 10000 Mbps full-duplex auto-nego tc-ingress + addr A0:36:9F:74:EC:F0 OF_RX_IP_CSUM OF_TX_IP_CSUM OF_TX_TCP_CSUM OF_TX_UDP_CSUM + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 138 132 109668 103524 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 138 134 109626 105048 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 +^C +``` + +As we expect, traffic bandwidth reported by both `dpdk0` statistics and `iperf` command outputs are decreased to about 800 Kbps, the actual bandwidth we preset. + +At last, let's have a look at the statistic of Qsch 10:0 to confirm that the traffic is processed by it. + +``` +[root@dpvs-test]# dpip qsch show -s dev dpdk0 handle 10: +qsch tbf 10: dev dpdk0 parent 1: flags 0x1 cls 0 rate 800.00Kbps burst 4000B peakrate 1.00Mbps minburst 2000B limit 8000B + Sent 1021620 bytes 687 pkts (dropped 64, overlimits 28738652 requeues 0) + Backlog 0 bytes 0 pkts +``` + +**Test 3. Requests from 2001::1:15 to service B should be classified into Qsch 2:0 with a bandwidth limit of 200 Mbps.** + +Similar to `Test 1`, we should set the route for network 2001::/64 on Client to use 2001::1:15 as the source IP address. + +``` +[root@client]# ip -6 route change 2001::/64 dev eth0 proto kernel src 2001::1:15 +[root@client]# ip -6 route show 2001::/64 +2001::/64 dev eth0 proto kernel src 2001::1:15 metric 1024 +``` + +Then generate traffic to service B and watch statistics of `dpdk0` in the meanwhile. + +``` +[root@client]# iperf -V -t 10 -c 2001::30 -p 8080 +------------------------------------------------------------ +Client connecting to 2001::30, TCP port 8080 +TCP window size: 680 KByte (default) +------------------------------------------------------------ +[ 3] local 2001::1:15 port 57871 connected with 2001::30 port 8080 +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-10.0 sec 228 MBytes 191 Mbits/sec + +[root@dpvs-test]# dpip link show dpdk0 -i 1 -C -s +1: dpdk0: socket 0 mtu 1500 rx-queue 4 tx-queue 4 + UP 10000 Mbps full-duplex auto-nego tc-ingress + addr A0:36:9F:74:EC:F0 OF_RX_IP_CSUM OF_TX_IP_CSUM OF_TX_TCP_CSUM OF_TX_UDP_CSUM + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 18729 18641 25279106 24853492 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 + ipackets/pps opackets/pps ibytes/Bps obytes/Bps + 18675 18608 25251290 24851260 + ierrors/pps oerrors/pps imissed/pps rx_nombuf/pps + 0 0 0 0 +^C +``` + +As the results show, the traffic bandwidth to service B is limited to 200 Mbps. Then check statistics of Qsch 2:0 to confirm the test traffic has been processed by it. + +``` +rootdpvs-test]# dpip qsch show -s dev dpdk0 handle 2: +qsch tbf 2: dev dpdk0 parent ingress flags 0x1 cls 1 rate 200.00Mbps burst 200000B limit 50000B + Sent 250840912 bytes 165690 pkts (dropped 975, overlimits 28211449 requeues 0) + Backlog 0 bytes 0 pkts +``` + +**Test 4. Requests from 2001::15 to service B should be rejected.** + +Firstly, change the route for network 2001::/64 on Client to use 2001::15 as the source IP address. + +``` +[root@client]# ip -6 route change 2001::/64 dev eth0 proto kernel src 2001::15 +[root@client]# ip -6 route show 2001::/64 +2001::/64 dev eth0 proto kernel src 2001::15 metric 1024 +``` + +Then, try access service B with tool `curl`, + +``` +[root@client]# curl -m 2 -g [2001::30]:8080 +curl: (28) Connection timed out after 2001 milliseconds +``` +and get *failed* no surprisingly. Request from 2001::15 is rejected by service B. + +As a contrast, we turn off the `tc-ingress` switch of `dpdk0`, and redo the test. + +``` +[root@dpvs-test]# dpip link set dpdk0 tc-ingress off + +[root@client]# curl -m 2 -g [2001::30]:8080 +nginx 192.168.88.215 +``` + +As what we expect, request from 2001::15 is accepted by service B this time. diff --git a/doc/tutorial.md b/doc/tutorial.md index f1c5925f6..73ec78b68 100644 --- a/doc/tutorial.md +++ b/doc/tutorial.md @@ -21,6 +21,7 @@ DPVS Tutorial - [KNI for virtual device](#vdev-kni) * [UDP Option of Address (UOA)](#uoa) * [Launch DPVS in Virtual Machine (Ubuntu)](#Ubuntu16.04) +* [Traffic Control(TC)](#tc) * [Debug DPVS](#debug) - [Debug with Log](#debug-with-log) - [Packet Capture and Tcpdump](#packet-capture) @@ -1151,6 +1152,12 @@ worker_defs { ``` + + +# Traffic Control(TC) + +Please refer to doc [tc.md](tc.md). + # Debug DPVS diff --git a/scripts/setup.tc.sample.sh b/scripts/setup.tc.sample.sh deleted file mode 100644 index 5f11f509a..000000000 --- a/scripts/setup.tc.sample.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/sh - -# raychen, Jan 2018 - -VIP="192.168.204.252" -DIP="192.168.204.50" -RSs="192.168.204.51 192.168.204.52 192.168.204.53 192.168.204.54" -LIPs="192.168.204.200 192.168.204.201 192.168.204.202 192.168.204.203 192.168.204.204 192.168.204.205" -LANIF=dpdk0 - -# 0: root -# oif=lan / -# 1:0 (10m) -# / \ -# (2000p) 1:1 1:2 (4g) - -# 10m rate limit with tbf -./dpip qsch add dev ${LANIF} handle 1:0 parent 0: tbf rate 10m burst 1500000 latency 20 -# 2000 pfifo limit -./dpip qsch add dev ${LANIF} handle 1:1 parent 1: pfifo limit 2000 -# 4g rate limit with tbf -./dpip qsch add dev ${LANIF} handle 1:2 parent 1: tbf rate 4g burst 150000000 latency 20 - -# check at 0:, goto 1: if packet sent by lan interface -./dpip cls add dev ${LANIF} qsch 0: match pattern "tcp,oif=${LANIF}" target 1: -# check at 1:, goto 1:1 if packet is tcp,to=0.0.0.0:80 -./dpip cls add dev ${LANIF} qsch 1: match pattern 'tcp,to=0.0.0.0:80' target 1:1 -# check at 1:, goto 1:2 if packet is tcp,from=0.0.0.0:80 -./dpip cls add dev ${LANIF} qsch 1: match pattern 'tcp,from=0.0.0.0:80' target 1:2 - -./dpip addr add $DIP/24 dev ${LANIF} -./dpip addr add $VIP/24 dev ${LANIF} -./ipvsadm -A -t $VIP:80 -s rr - -for rs in $RSs; do - ./ipvsadm -a -t $VIP:80 -r $rs -b -done - -for lip in $LIPs; do - ./ipvsadm --add-laddr -z $lip -t $VIP:80 -F ${LANIF} -done From a7eac978dcbafeb1b7358e14e4009db107ce202a Mon Sep 17 00:00:00 2001 From: ywc689 Date: Fri, 19 Mar 2021 10:29:38 +0800 Subject: [PATCH 28/35] tc: fix compile problem in debug and old dpdk versions reported by ci --- src/netif.c | 2 +- src/tc/cls_match.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/netif.c b/src/netif.c index 24514855e..b80c1b106 100644 --- a/src/netif.c +++ b/src/netif.c @@ -2259,7 +2259,7 @@ static int netif_deliver_mbuf(struct netif_port *dev, lcoreid_t cid, */ if (dev->flag & NETIF_PORT_FLAG_FORWARD2KNI) { struct rte_mbuf *mbuf_copied = mbuf_copy(mbuf, pktmbuf_pool[dev->socket]); - if (likely(mbuf_copied)) + if (likely(mbuf_copied != NULL)) kni_ingress(mbuf_copied, dev); else RTE_LOG(WARNING, NETIF, "%s: failed to copy mbuf for kni\n", __func__); diff --git a/src/tc/cls_match.c b/src/tc/cls_match.c index f9e67d0b6..ebbdbbae8 100644 --- a/src/tc/cls_match.c +++ b/src/tc/cls_match.c @@ -204,8 +204,8 @@ static int match_classify(struct tc_cls *cls, struct rte_mbuf *mbuf, char cls_id[16], qsch_id[16]; if (ip6h) { - inet_ntop(AF_INET6, &ip6h->saddr, sip, sizeof(sip)); - inet_ntop(AF_INET6, &ip6h->daddr, dip, sizeof(dip)); + inet_ntop(AF_INET6, &ip6h->ip6_src, sip, sizeof(sip)); + inet_ntop(AF_INET6, &ip6h->ip6_dst, dip, sizeof(dip)); } else { inet_ntop(AF_INET, &iph->saddr, sip, sizeof(sip)); inet_ntop(AF_INET, &iph->daddr, dip, sizeof(dip)); From f8e72ed54d3a2911a084a0c5089251e6e9b9f6c8 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Mon, 22 Mar 2021 10:08:16 +0800 Subject: [PATCH 29/35] doc: update todo doc Signed-off-by: ywc689 --- doc/TODO.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/doc/TODO.md b/doc/TODO.md index b7b9d675c..7961406da 100644 --- a/doc/TODO.md +++ b/doc/TODO.md @@ -6,24 +6,29 @@ DPVS TODO list * [ ] NIC without Flow-Director (FDIR) - [x] Packet redirect to workers - [ ] RSS pre-calcuating + - [ ] Replace fdir with Generic Flow(rte_flow) * [x] Merge DPDK stable 18.11 * [ ] Merge DPDK stable 20.11 -* [ ] Service whitelist ACL -* [ ] SNAT ACL +* [x] Service whitelist ACL +* [ ] IPset Support + - [ ] SNAT ACL with IPset + - [ ] TC policing with IPset * [x] Refactor Keepalived (porting latest stable keepalived) +* [ ] Keepalived stability test and optimization. * [x] Packet Capture and Tcpdump Support * [ ] Logging - [ ] Packet based logging - [ ] Session based logging (creation, expire, statistics) -* [ ] CI, Test Automation Setup +* [x] CI, Test Automation Setup * [ ] Performance Optimization + - [ ] Performance test tools and docs - [x] CPU Performance Tuning - [x] Memory Performance Tuning - [ ] Numa-aware NIC - [ ] Minimal Running Resource - [x] KNI performance Tuning - [ ] Multi-core Performance Tuning - - [ ] TC performance Tuning + - [x] TC performance Tuning * [x] 25G/40G NIC Supports * [ ] VxLAN Support * [ ] IPv6 Tunnel Device From ab6ced5349913d70e30501dd158139868b414f66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=90=95=E9=80=B8=E5=87=A1=28lvyifan=29?= Date: Fri, 5 Mar 2021 11:08:41 +0800 Subject: [PATCH 30/35] keepalvied bug fix --- .../keepalived/check/check_daemon.c | 4 ++- .../keepalived/check/check_parser.c | 3 ++ tools/keepalived/keepalived/check/ipwrapper.c | 31 ++++++++++++++++++- .../keepalived/keepalived/include/ipwrapper.h | 1 + 4 files changed, 37 insertions(+), 2 deletions(-) diff --git a/tools/keepalived/keepalived/check/check_daemon.c b/tools/keepalived/keepalived/check/check_daemon.c index d0619f785..06218afa6 100644 --- a/tools/keepalived/keepalived/check/check_daemon.c +++ b/tools/keepalived/keepalived/check/check_daemon.c @@ -224,8 +224,10 @@ checker_terminate_phase1(bool schedule_next_thread) if (global_data->lvs_flush_onstop == LVS_FLUSH_FULL) { log_message(LOG_INFO, "Flushing lvs on shutdown in oneshot"); ipvs_flush_cmd(); - } else + } else { clear_services(); + clear_tunnels(); + } } if (schedule_next_thread) { diff --git a/tools/keepalived/keepalived/check/check_parser.c b/tools/keepalived/keepalived/check/check_parser.c index 0ec32a225..a59e22d09 100644 --- a/tools/keepalived/keepalived/check/check_parser.c +++ b/tools/keepalived/keepalived/check/check_parser.c @@ -996,6 +996,9 @@ whtlst_gname_handler(const vector_t *strvec) static void tunnel_handler(const vector_t *strvec) { + if (!strvec) + return; + alloc_tunnel(vector_slot(strvec, 1)); } diff --git a/tools/keepalived/keepalived/check/ipwrapper.c b/tools/keepalived/keepalived/check/ipwrapper.c index 5d74ec2f1..9ac45a284 100755 --- a/tools/keepalived/keepalived/check/ipwrapper.c +++ b/tools/keepalived/keepalived/check/ipwrapper.c @@ -303,6 +303,22 @@ clear_service_vs(virtual_server_t * vs, bool stopping) UNSET_ALIVE(vs); } +static void +clear_laddr_group(local_addr_group *laddr_group, virtual_server_t *vs) +{ + element e; + local_addr_entry *laddr_entry; + + LIST_FOREACH(laddr_group->addr_ip, laddr_entry, e) { + if (!ipvs_laddr_remove_entry(vs, laddr_entry)) + return; + } + LIST_FOREACH(laddr_group->range, laddr_entry, e) { + if (!ipvs_laddr_remove_entry(vs, laddr_entry)) + return; + } +} + /* IPVS cleaner processing */ void clear_services(void) @@ -312,11 +328,15 @@ clear_services(void) element e; virtual_server_t *vs; + local_addr_group *laddr_group; if (!check_data || !check_data->vs) return; LIST_FOREACH(check_data->vs, vs, e) { + laddr_group = ipvs_get_laddr_group_by_name(vs->local_addr_gname, + check_data->laddr_group); + clear_laddr_group(laddr_group, vs); /* Remove the real servers, and clear the vs unless it is * using a VS group and it is not the last vs of the same * protocol or address family using the group. */ @@ -324,7 +344,6 @@ clear_services(void) } } -/* Set a realserver IPVS rules */ static bool init_service_rs(virtual_server_t * vs) { @@ -1470,3 +1489,13 @@ int clear_diff_tunnel(void) return IPVS_SUCCESS; } +void +clear_tunnels(void) +{ + element e; + tunnel_group *group; + + LIST_FOREACH(check_data->tunnel_group, group, e) { + clear_tunnel_group(group); + } +} \ No newline at end of file diff --git a/tools/keepalived/keepalived/include/ipwrapper.h b/tools/keepalived/keepalived/include/ipwrapper.h index af81b9cfd..df7726673 100755 --- a/tools/keepalived/keepalived/include/ipwrapper.h +++ b/tools/keepalived/keepalived/include/ipwrapper.h @@ -55,6 +55,7 @@ extern void set_checker_state(checker_t *, bool); extern void update_svr_checker_state(bool, checker_t *); extern bool init_services(void); extern void clear_services(void); +extern void clear_tunnels(void); extern void set_quorum_states(void); extern void clear_diff_services(list); extern void check_new_rs_state(void); From 632ea8bf9f9a47b723ea6e537f509ed7c850c773 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Tue, 13 Apr 2021 11:49:29 +0800 Subject: [PATCH 31/35] tc: remove duplicated functions --- include/linux_ipv6.h | 12 ------------ src/tc/cls_match.c | 8 ++++---- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/include/linux_ipv6.h b/include/linux_ipv6.h index c25baa319..05bc4ae3b 100644 --- a/include/linux_ipv6.h +++ b/include/linux_ipv6.h @@ -266,18 +266,6 @@ static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr return memcmp(a1, a2, sizeof(struct in6_addr)); } -static inline int ipv6_addr_cmp_u128(const struct in6_addr *a1, const struct in6_addr *a2) -{ - uint8_t *p1, *p2; - - for (p1 = (uint8_t *)a1, p2 = (uint8_t *)a2; p1 - (uint8_t *)a1 < 16; p1++, p2++) { - if (*p1 != *p2) - return *p1 > *p2 ? 1 : -1; - } - - return 0; -} - static inline bool ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, const struct in6_addr *a2) diff --git a/src/tc/cls_match.c b/src/tc/cls_match.c index ebbdbbae8..0a2772769 100644 --- a/src/tc/cls_match.c +++ b/src/tc/cls_match.c @@ -119,14 +119,14 @@ static int match_classify(struct tc_cls *cls, struct rte_mbuf *mbuf, ip6h = rte_pktmbuf_mtod_offset(mbuf, struct ip6_hdr *, offset); if (!ipv6_addr_any(&m->srange.max_addr.in6)) { - if (ipv6_addr_cmp_u128(&ip6h->ip6_src, &m->srange.min_addr.in6) < 0 || - ipv6_addr_cmp_u128(&ip6h->ip6_src, &m->srange.max_addr.in6) > 0) + if (ipv6_addr_cmp(&ip6h->ip6_src, &m->srange.min_addr.in6) < 0 || + ipv6_addr_cmp(&ip6h->ip6_src, &m->srange.max_addr.in6) > 0) goto done; } if (!ipv6_addr_any(&m->drange.max_addr.in6)) { - if (ipv6_addr_cmp_u128(&ip6h->ip6_dst, &m->drange.min_addr.in6) < 0 || - ipv6_addr_cmp_u128(&ip6h->ip6_dst, &m->drange.max_addr.in6) > 0) + if (ipv6_addr_cmp(&ip6h->ip6_dst, &m->drange.min_addr.in6) < 0 || + ipv6_addr_cmp(&ip6h->ip6_dst, &m->drange.max_addr.in6) > 0) goto done; } From 3a12c50ace97d49deaecb7bc0ee7e05152708b49 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Tue, 13 Apr 2021 14:54:31 +0800 Subject: [PATCH 32/35] fix logging problem for debug level Signed-off-by: ywc689 --- src/global_conf.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/src/global_conf.c b/src/global_conf.c index 1447e313a..8fcff26fa 100644 --- a/src/global_conf.c +++ b/src/global_conf.c @@ -31,28 +31,42 @@ static void log_current_time(void) RTE_LOG(INFO, CFG_FILE, "load dpvs configuation file at %s\n", buf); } +static inline void set_log_level_dynamic_types(const char *regex, uint32_t level) +{ +#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0) + rte_log_set_level_regexp(regex, level); +#endif +} + static int set_log_level(char *log_level) { if (!log_level) { rte_log_set_global_level(RTE_LOG_DEBUG); - rte_log_set_level_regexp("user[0-9]", RTE_LOG_DEBUG); - } else if (!strncmp(log_level, "EMERG", strlen("EMERG"))) + set_log_level_dynamic_types("user[0-9]", RTE_LOG_DEBUG); + } else if (!strncmp(log_level, "EMERG", strlen("EMERG"))) { rte_log_set_global_level(RTE_LOG_EMERG); - else if (!strncmp(log_level, "ALERT", strlen("ALERT"))) + set_log_level_dynamic_types("user[0-9]", RTE_LOG_EMERG); + } else if (!strncmp(log_level, "ALERT", strlen("ALERT"))) { rte_log_set_global_level(RTE_LOG_ALERT); - else if (!strncmp(log_level, "CRIT", strlen("CRIT"))) + set_log_level_dynamic_types("user[0-9]", RTE_LOG_ALERT); + } else if (!strncmp(log_level, "CRIT", strlen("CRIT"))) { rte_log_set_global_level(RTE_LOG_CRIT); - else if (!strncmp(log_level, "ERR", strlen("ERR"))) + set_log_level_dynamic_types("user[0-9]", RTE_LOG_CRIT); + } else if (!strncmp(log_level, "ERR", strlen("ERR"))) { rte_log_set_global_level(RTE_LOG_ERR); - else if (!strncmp(log_level, "WARNING", strlen("WARNING"))) + set_log_level_dynamic_types("user[0-9]", RTE_LOG_ERR); + } else if (!strncmp(log_level, "WARNING", strlen("WARNING"))) { rte_log_set_global_level(RTE_LOG_WARNING); - else if (!strncmp(log_level, "NOTICE", strlen("NOTICE"))) + set_log_level_dynamic_types("user[0-9]", RTE_LOG_WARNING); + } else if (!strncmp(log_level, "NOTICE", strlen("NOTICE"))) { rte_log_set_global_level(RTE_LOG_NOTICE); - else if (!strncmp(log_level, "INFO", strlen("INFO"))) + set_log_level_dynamic_types("user[0-9]", RTE_LOG_NOTICE); + } else if (!strncmp(log_level, "INFO", strlen("INFO"))) { rte_log_set_global_level(RTE_LOG_INFO); - else if (!strncmp(log_level, "DEBUG", strlen("DEBUG"))) { + set_log_level_dynamic_types("user[0-9]", RTE_LOG_INFO); + } else if (!strncmp(log_level, "DEBUG", strlen("DEBUG"))) { rte_log_set_global_level(RTE_LOG_DEBUG); - rte_log_set_level_regexp("user[0-9]", RTE_LOG_DEBUG); + set_log_level_dynamic_types("user[0-9]", RTE_LOG_DEBUG); } else { RTE_LOG(WARNING, CFG_FILE, "%s: illegal log level: %s\n", __func__, log_level); From 762fe1b6f20f682a9f13be8b6a08db4beb493d72 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Thu, 15 Apr 2021 21:01:55 +0800 Subject: [PATCH 33/35] version: release v1.8.9 --- src/VERSION | 54 ++++++++++++++++++++++------------------------------- 1 file changed, 22 insertions(+), 32 deletions(-) diff --git a/src/VERSION b/src/VERSION index 3dada72ad..66ebe6e21 100755 --- a/src/VERSION +++ b/src/VERSION @@ -1,43 +1,33 @@ #!/bin/sh - # program: dpvs -# Jan 7, 2021 +# Apr 15, 2021 # -# # Features +# Features # ---------- -# - Dpvs: Configurable kni worker type. -# - Dpvs: Support mh(megalev hash) scheduling. -# - Dpvs: Udp one-packet forwarding. -# - Dpvs: Dpdk-pdump for package capture. -# - Dpvs: Support examination of eal memory(ring/seg/zone/pool) usage. -# - Dpvs: Sapool memory optimization. -# - Dpvs: Support ENA driver on AWS -# - Dpvs: Some code refactorings, including sockopt definitions, jobs initializations, main function, etc. -# - Update some documents. -# - Delete some useless codes. +# - CI: Enable CI workflow. +# - Dpvs: TC stability and performance enhancement. +# - Dpvs: TC supoorts ipv6 and ingress traffic. +# - Dpvs: Add document and examples for dpvs tc. +# - Dpvs: Add supports for ipvs whitelist. +# - Dpvs: Support icmp forwarding with icmp_fwd_core. +# - Dpvs: Support mtu config. +# - Dpvs: Obsolete dpdk 16.07 and 17.05.02. +# - Dpdk: Add eal memory debug patch for dpdk-stable-18.11.2. # # # Bugfix # -------- -# - Dpvs: Fix lcore packet statistics loss problem on isol_rx_lcore. -# - Dpvs: Fix per-lcore mulitcast address initialization problem. -# - Dpvs: Fix ifa::tstemp timer problem when updating ifa entry. -# - Dpvs: Fix ifa adding fail problem by reusing expired ifa. -# - Dpvs: Fix memory leak problem when mbuf is invalid. -# - Dpvs: Fix problem that rs cannot get client's real ip for some short tcp connections. -# - Dpvs: Fix unnecessary memory waste caused by incorrect setting of rte_mempool size. -# - Dpvs: Fix connection limitation problem caused by incorrect rs connection statistics. -# - Dpvs: Fix illegal instruction problem by using the same RTE_MACHINE and cpu CFLAGS as dpdk. -# - Dpvs: Fix mismatch problem when deleting blacklist addresses. -# - Dpvs: Fix problem that mbuf may be used after released. -# - Keepalived: Conhash scheduler performance optimization. -# - Keepalived: Use correct return value for netlink_route. -# - Keepalived: Fix compile error with kernel 3.11 and above. -# - Keepalived: Fix too many open file descriptor problem for tcp_check health check. -# - Keepailved: Fix epoll_wait error after keepalived running for a long time. -# - Ipvsadm: Fix compile warnings of format-overflow and restrict. -# - Ipvsadm: Fix `ipvsadm -ln` show null error. -# - Toa: Fix problem when getting/setting pte writable flag. +# - Dpvs: Fix traceroute problem of dpvs ip address. +# - Dpvs: Fix flags conflicts for ipvs conn/service/dest. +# - Dpvs: Reset tcp connection when syn-cookie check fails. +# - Dpvs: Use correct mbuf:l4_len for checkout offload. +# - Dpvs: Fix udp checksum problem for uoa when checksum offload is off. +# - Dpvs: Simplify checksum calculations and remove superfluous checksum functions. +# - Dpvs: Refactor netif recv procedure. +# - Keepalived: Fix problem that local ip config doesn't take effect when restart. +# - Keepalived: Fix crash problem when tunnel is configured. +# - Dpvs: Fix debug level log problem. export VERSION=1.8 -export RELEASE=8 +export RELEASE=9 echo $VERSION-$RELEASE From b6f1a60807d085ad26f93ae4045ae3ad7a7b86c8 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Mon, 26 Apr 2021 14:24:28 +0800 Subject: [PATCH 34/35] doc: fix examples for using tc Signed-off-by: ywc689 --- doc/tc.md | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/doc/tc.md b/doc/tc.md index 270ecd9c4..086b0e177 100644 --- a/doc/tc.md +++ b/doc/tc.md @@ -105,7 +105,7 @@ dpip link set dpdk0 tc-ingress on # enable tc-ingress for dpdk0 ``` You can verify if TC for dpdk0 is enabled by checking if "tc-egress" or "tc-ingress" flag exists in the output of the command `dpip link show dpdk0`. -> It's safe to enable or disable TC of a device anytime you like, even if when TC is processing packets. +> It's safe to enable or disable TC of a device whenever you like, even if when TC is processing packets. **2. Add a root Qsch object.** @@ -140,25 +140,33 @@ qsch tbf 1: dev dpdk0 parent root flags 0x0 cls 0 rate 10.00Mbps burst 10000B li # Check Cls on Qsch root [root@dpvs-test]# dpip cls show dev dpdk0 qsch root -cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: +cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: # Check Cls on Qsch ingress [root@dpvs-test]# dpip cls show dev dpdk0 qsch ingress -cls match 8003: dev dpdk0 qsch ingress pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: +cls match 8002: dev dpdk0 qsch ingress pkttype 0x0800 prio 0 ICMP,iif=dpdk0 target 2: -# Get statistics of Qsch 2:0 +# Get statistics of Qsch +[root@dpvs-test]# dpip qsch -s show dev dpdk0 handle 1: +qsch tbf 1: dev dpdk0 parent root flags 0x0 cls 0 rate 10.00Mbps burst 10000B limit 2500B + Sent 4050639 bytes 46334 pkts (dropped 0, overlimits 0 requeues 0) + Backlog 0 bytes 0 pkts [root@dpvs-test]# dpip qsch -s show dev dpdk0 handle 2: qsch bfifo 2: dev dpdk0 parent ingress flags 0x1 cls 0 limit 100000 - Sent 17308172 bytes 176614 pkts (dropped 0, overlimits 0 requeues 0) + Sent 980 bytes 10 pkts (dropped 0, overlimits 0 requeues 0) Backlog 0 bytes 0 pkts # Get per-lcore config of Cls 8001: [root@dpvs-test]# dpip cls -v show dev dpdk0 qsch root handle 8001: -[00] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: -[01] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: -[02] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: -[03] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: -[04] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 ICMP,iif-dpdk0 target 2: +[00] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: +[01] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: +[02] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: +[04] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: +[03] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: +[05] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: +[06] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: +[07] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: +[08] cls match 8001: dev dpdk0 qsch root pkttype 0x0800 prio 0 TCP,oif=dpdk0 target 1: ``` From 9bb9a73116632e70f720cafef2fe71ee10b1bc21 Mon Sep 17 00:00:00 2001 From: ywc689 Date: Mon, 26 Apr 2021 14:30:11 +0800 Subject: [PATCH 35/35] version: release v1.8.10 Signed-off-by: ywc689 --- src/VERSION | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/VERSION b/src/VERSION index 66ebe6e21..76f9440e2 100755 --- a/src/VERSION +++ b/src/VERSION @@ -1,18 +1,18 @@ #!/bin/sh - # program: dpvs -# Apr 15, 2021 +# Apr 26, 2021 # # Features # ---------- # - CI: Enable CI workflow. # - Dpvs: TC stability and performance enhancement. -# - Dpvs: TC supoorts ipv6 and ingress traffic. +# - Dpvs: TC supports ipv6 and ingress traffic. # - Dpvs: Add document and examples for dpvs tc. # - Dpvs: Add supports for ipvs whitelist. # - Dpvs: Support icmp forwarding with icmp_fwd_core. # - Dpvs: Support mtu config. # - Dpvs: Obsolete dpdk 16.07 and 17.05.02. -# - Dpdk: Add eal memory debug patch for dpdk-stable-18.11.2. +# - Patch: Add eal memory debug patch for dpdk-stable-18.11.2. # # # Bugfix # -------- @@ -23,11 +23,11 @@ # - Dpvs: Fix udp checksum problem for uoa when checksum offload is off. # - Dpvs: Simplify checksum calculations and remove superfluous checksum functions. # - Dpvs: Refactor netif recv procedure. +# - Dpvs: Fix debug level log problem. # - Keepalived: Fix problem that local ip config doesn't take effect when restart. # - Keepalived: Fix crash problem when tunnel is configured. -# - Dpvs: Fix debug level log problem. export VERSION=1.8 -export RELEASE=9 +export RELEASE=10 echo $VERSION-$RELEASE