Skip to content

Commit

Permalink
Separate jumbo mbuf pool
Browse files Browse the repository at this point in the history
  • Loading branch information
PlagueCZ committed Sep 26, 2024
1 parent cfcfff7 commit fe1178c
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 10 deletions.
15 changes: 12 additions & 3 deletions include/dpdk_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,25 @@ extern "C" {
// there are three periodic messages (ARP, ND, ND-RA) that could be sent at once
#define DP_PERIODIC_Q_SIZE (DP_MAX_PORTS * 3)

// 40Gb/s with 1500B packets means ~9M packets/s
// assuming 0.1s delay in processing means ~900k mbufs needed
// 40Gb/s with 1500B packets means ~3.3M packets/s
// assuming 0.1s delay in processing means ~350k mbufs needed
#ifdef ENABLE_PYTEST
#define DP_MBUF_POOL_SIZE (50*1024)
#else
#define DP_MBUF_POOL_SIZE (900*1024)
#define DP_MBUF_POOL_SIZE (350*1024)
#endif
#define DP_MBUF_BUF_SIZE (1518 + RTE_PKTMBUF_HEADROOM)

#ifdef ENABLE_PF1_PROXY
#define DP_JUMBO_MBUF_POOL_SIZE (50*1024)
#define DP_JUMBO_MBUF_BUF_SIZE (9118 + RTE_PKTMBUF_HEADROOM)
#endif

struct dp_dpdk_layer {
struct rte_mempool *rte_mempool;
#ifdef ENABLE_PF1_PROXY
struct rte_mempool *rte_jumbo_mempool;
#endif
struct rte_ring *grpc_tx_queue;
struct rte_ring *grpc_rx_queue;
struct rte_ring *periodic_msg_queue;
Expand Down
13 changes: 10 additions & 3 deletions src/dp_port.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ static int dp_port_init_ethdev(struct dp_port *port, struct rte_eth_dev_info *de
struct rte_eth_rxconf rxq_conf;
struct rte_eth_conf port_conf = port_conf_default;
uint16_t nr_hairpin_queues;
struct rte_mempool *mempool;
int ret;

/* Default config */
Expand All @@ -128,10 +129,15 @@ static int dp_port_init_ethdev(struct dp_port *port, struct rte_eth_dev_info *de

/* RX and TX queues config */
for (uint16_t i = 0; i < DP_NR_STD_RX_QUEUES; ++i) {
mempool = dp_layer->rte_mempool;
#ifdef ENABLE_PF1_PROXY
if (dp_conf_is_pf1_proxy_enabled() && (port == dp_get_pf1() || port == &_dp_pf_proxy_tap_port))
mempool = dp_layer->rte_jumbo_mempool;
#endif
ret = rte_eth_rx_queue_setup(port->port_id, i, 1024,
port->socket_id,
&rxq_conf,
dp_layer->rte_mempool);
mempool);
if (DP_FAILED(ret)) {
DPS_LOG_ERR("Rx queue setup failed", DP_LOG_PORT(port), DP_LOG_RET(ret));
return DP_ERROR;
Expand Down Expand Up @@ -231,15 +237,16 @@ static struct dp_port *dp_port_init_interface(uint16_t port_id, struct rte_eth_d
port->socket_id = socket_id;
_dp_port_table[port_id] = port;

if (is_pf && DP_FAILED(dp_port_register_pf(port)))
return NULL;

if (DP_FAILED(dp_port_init_ethdev(port, dev_info)))
return NULL;

if (dp_conf_is_multiport_eswitch() && DP_FAILED(dp_configure_async_flows(port->port_id)))
return NULL;

if (is_pf) {
if (DP_FAILED(dp_port_register_pf(port)))
return NULL;
ret = rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_INTR_LSC, dp_link_status_change_event_callback, NULL);
if (DP_FAILED(ret)) {
DPS_LOG_ERR("Cannot register link status callback", DP_LOG_RET(ret));
Expand Down
19 changes: 18 additions & 1 deletion src/dpdk_layer.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

#include "dpdk_layer.h"
#include <rte_graph_worker.h>
#include "dp_conf.h"
#include "dp_error.h"
#include "dp_graph.h"
#include "dp_log.h"
Expand Down Expand Up @@ -35,13 +36,26 @@ static int dp_dpdk_layer_init_unsafe(void)
{
dp_layer.rte_mempool = rte_pktmbuf_pool_create("mbuf_pool", DP_MBUF_POOL_SIZE,
DP_MEMPOOL_CACHE_SIZE, DP_MBUF_PRIV_DATA_SIZE,
RTE_MBUF_DEFAULT_BUF_SIZE,
DP_MBUF_BUF_SIZE,
rte_socket_id());
if (!dp_layer.rte_mempool) {
DPS_LOG_ERR("Cannot create mbuf pool", DP_LOG_RET(rte_errno));
return DP_ERROR;
}

#ifdef ENABLE_PF1_PROXY
if (dp_conf_is_pf1_proxy_enabled()) {
dp_layer.rte_jumbo_mempool = rte_pktmbuf_pool_create("jumbo_mbuf_pool", DP_JUMBO_MBUF_POOL_SIZE,
DP_MEMPOOL_CACHE_SIZE, DP_MBUF_PRIV_DATA_SIZE,
DP_JUMBO_MBUF_BUF_SIZE,
rte_socket_id());
if (!dp_layer.rte_jumbo_mempool) {
DPS_LOG_ERR("Cannot create jumbo mbuf pool", DP_LOG_RET(rte_errno));
return DP_ERROR;
}
}
#endif

dp_layer.num_of_vfs = dp_get_num_of_vfs();
if (DP_FAILED(dp_layer.num_of_vfs))
return DP_ERROR;
Expand Down Expand Up @@ -80,6 +94,9 @@ void dp_dpdk_layer_free(void)
ring_free(dp_layer.periodic_msg_queue);
ring_free(dp_layer.grpc_rx_queue);
ring_free(dp_layer.grpc_tx_queue);
#ifdef ENABLE_PF1_PROXY
rte_mempool_free(dp_layer.rte_jumbo_mempool);
#endif
rte_mempool_free(dp_layer.rte_mempool);
}

Expand Down
11 changes: 8 additions & 3 deletions src/monitoring/dp_graphtrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,14 @@ static int dp_graphtrace_init_memory(void)
// So using ringbuffer size minus one, when the ring buffer is (almost) full, allocation will start failing
// (this is intentional, see below)
graphtrace.mempool = rte_pktmbuf_pool_create(DP_GRAPHTRACE_MEMPOOL_NAME, DP_GRAPHTRACE_RINGBUF_SIZE-1,
DP_MEMPOOL_CACHE_SIZE, DP_MBUF_PRIV_DATA_SIZE + sizeof(struct dp_graphtrace_pktinfo),
RTE_MBUF_DEFAULT_BUF_SIZE,
rte_socket_id());
DP_MEMPOOL_CACHE_SIZE,
DP_MBUF_PRIV_DATA_SIZE + sizeof(struct dp_graphtrace_pktinfo),
#ifdef ENABLE_PF1_PROXY
DP_JUMBO_MBUF_BUF_SIZE,
#else
DP_MBUF_BUF_SIZE,
#endif
rte_socket_id());
if (!graphtrace.mempool) {
DPS_LOG_ERR("Cannot allocate graphtrace pool", DP_LOG_RET(rte_errno));
return DP_ERROR;
Expand Down

0 comments on commit fe1178c

Please sign in to comment.