summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en.h
diff options
context:
space:
mode:
authorDragos Tatulea <dtatulea@nvidia.com>2023-01-18 18:08:51 +0300
committerSaeed Mahameed <saeedm@nvidia.com>2023-03-28 23:43:58 +0300
commit6f5742846053c7656992e70726aad26a5129cf19 (patch)
tree134986d30ef40e97f0b3e129a74bed539e4617df /drivers/net/ethernet/mellanox/mlx5/core/en.h
parent4a5c5e25008f374e525178f5ba2581cfa3303a0c (diff)
downloadlinux-6f5742846053c7656992e70726aad26a5129cf19.tar.xz
net/mlx5e: RX, Enable skb page recycling through the page_pool
Start using the page_pool skb recycling api to recycle all pages back to the page pool and stop using atomic page reference counting. The mlx5e driver used to manage in-flight pages using page refcounting: for each fragment there were 2 atomic write operations happening (one for building the skb and one on skb release). The page_pool api introduced a method to track page fragments more optimally: * The page's pp_fragment_count is set to a large bias on page alloc (1 x atomic write operation). * The driver tracks the actual page fragments in a non atomic variable. * When the skb is recycled, pp_fragment_count is decremented (atomic write operation). * When page is released in the driver, the unused number of fragments (relative to the bias) is deducted from pp_fragment_count (atomic write operation). * Last page defragmentation will only be an atomic read. So in total there are `number of fragments + 1` atomic write ops. As opposed to previously: `2 * frags` atomic writes ops. Pages are wrapped in a mlx5e_frag_page structure which also contains the number of fragments. This makes it easy to count the fragments in the driver. This change brings performance improvements for the case when the old rx page_cache had low recycling rates due to head of queue blocking. For a iperf3 TCP test with a single stream, on a single core (iperf and receive queue running on same core), the following improvements can be noticed: * Striding rq: - before (net-next baseline): bitrate = 30.1 Gbits/sec - after : bitrate = 31.4 Gbits/sec (diff: 4.14 %) * Legacy rq: - before (net-next baseline): bitrate = 30.2 Gbits/sec - after : bitrate = 33.0 Gbits/sec (diff: 8.48 %) There are 2 temporary performance degradations introduced: 1) TCP streams that had a good recycling rate with the old page_cache have a degradation for both striding and linear rq. This is due to very low page pool cache recycling: the pages are released during skb recycle which will release pages to the page pool ring for safety. The following patches in this series will tackle this problem by deferring the page release in the driver to increase the chance of having pages recycled to the cache. 2) XDP performance is now lower (4-5 %) due to the higher number of atomic operations used for fragment management. But this opens the door for supporting multiple packets per page in XDP, which will bring a big gain. Otherwise, performance is similar to baseline. Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en.h')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h12
1 files changed, 9 insertions, 3 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 2684e7af5a7a..0862556105d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -600,9 +600,14 @@ struct mlx5e_icosq {
struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
+struct mlx5e_frag_page {
+ struct page *page;
+ u16 frags;
+};
+
struct mlx5e_wqe_frag_info {
union {
- struct page **pagep;
+ struct mlx5e_frag_page *frag_page;
struct xdp_buff **xskp;
};
u32 offset;
@@ -610,6 +615,7 @@ struct mlx5e_wqe_frag_info {
};
union mlx5e_alloc_units {
+ DECLARE_FLEX_ARRAY(struct mlx5e_frag_page, frag_pages);
DECLARE_FLEX_ARRAY(struct page *, pages);
DECLARE_FLEX_ARRAY(struct xdp_buff *, xsk_buffs);
};
@@ -666,7 +672,7 @@ struct mlx5e_rq_frags_info {
struct mlx5e_dma_info {
dma_addr_t addr;
union {
- struct page **pagep;
+ struct mlx5e_frag_page *frag_page;
struct page *page;
};
};
@@ -674,7 +680,7 @@ struct mlx5e_dma_info {
struct mlx5e_shampo_hd {
u32 mkey;
struct mlx5e_dma_info *info;
- struct page **pages;
+ struct mlx5e_frag_page *pages;
u16 curr_page_index;
u16 hd_per_wq;
u16 hd_per_wqe;