diff options
author | Ciara Loftus <ciara.loftus@intel.com> | 2022-06-23 10:08:52 +0000 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-06-24 16:37:12 -0700 |
commit | 78f319315764e6730c143570609b230d1bea6928 (patch) | |
tree | 016502dae683bffb1494d27ba027f0bb92fae412 /drivers/net/ethernet/intel/i40e/i40e_txrx.c | |
parent | 85a1c6536f9939696f687d1b0996167e38998fb9 (diff) | |
download | linux-78f319315764e6730c143570609b230d1bea6928.tar.gz linux-78f319315764e6730c143570609b230d1bea6928.tar.bz2 linux-78f319315764e6730c143570609b230d1bea6928.zip |
i40e: read the XDP program once per NAPI
Similar to how it's done in the ice driver since 'eb087cd82864 ("ice:
propagate xdp_ring onto rx_ring")', read the XDP program once per NAPI
instead of once per descriptor cleaned. I measured an improvement in
throughput of 2% for the AF_XDP xdpsock l2fwd benchmark for zero copy mode
and 1% for copy mode.
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
Link: https://lore.kernel.org/r/20220623100852.7867-1-ciara.loftus@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index dadef56e5f9b..a327189deda0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2289,16 +2289,14 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) * i40e_run_xdp - run an XDP program * @rx_ring: Rx ring being processed * @xdp: XDP buffer containing the frame + * @xdp_prog: XDP program to run **/ -static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) +static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; - struct bpf_prog *xdp_prog; u32 act; - xdp_prog = READ_ONCE(rx_ring->xdp_prog); - if (!xdp_prog) goto xdp_out; @@ -2443,6 +2441,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) unsigned int offset = rx_ring->rx_offset; struct sk_buff *skb = rx_ring->skb; unsigned int xdp_xmit = 0; + struct bpf_prog *xdp_prog; bool failure = false; struct xdp_buff xdp; int xdp_res = 0; @@ -2452,6 +2451,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) #endif xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; @@ -2512,7 +2513,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); #endif - xdp_res = i40e_run_xdp(rx_ring, &xdp); + xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog); } if (xdp_res) { |