summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>2021-12-13 18:31:06 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-12-29 14:28:41 +0300
commitad6d20da2cfbe14b7b1200d15f39e65988b0b9e8 (patch)
treeb1fe7884eb91a9e416b0885c4d61fa646f9db3a5 /drivers/net/ethernet
parentc1c36df0b0a5f45eb5cf2ce594149120d7fa7d59 (diff)
downloadlinux-ad6d20da2cfbe14b7b1200d15f39e65988b0b9e8.tar.xz
ice: xsk: return xsk buffers back to pool when cleaning the ring
[ Upstream commit afe8a3ba85ec2a6b6849367e25c06a2f8e0ddd05 ] Currently we only NULL the xdp_buff pointer in the internal SW ring but we never give it back to the xsk buffer pool. This means that buffers can be leaked out of the buff pool and never be used again. Add missing xsk_buff_free() call to the routine that is supposed to clean the entries that are left in the ring so that these buffers in the umem can be used by other sockets. Also, only go through the space that is actually left to be cleaned instead of a whole ring. Fixes: 2d4238f55697 ("ice: Add support for AF_XDP") Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Tested-by: Kiran Bhandare <kiranx.bhandare@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index f4ab5259a56c..37c7dc6b44a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -810,14 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
{
- u16 i;
-
- for (i = 0; i < rx_ring->count; i++) {
- struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
+ u16 count_mask = rx_ring->count - 1;
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
- if (!xdp)
- continue;
+ for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ struct xdp_buff **xdp = &rx_ring->xdp_buf[ntc];
+ xsk_buff_free(*xdp);
*xdp = NULL;
}
}