summaryrefslogtreecommitdiff
path: root/include/linux/mlx5/cq.h
diff options
context:
space:
mode:
authorDaniel Jurgens <danielj@mellanox.com>2018-11-06 01:05:37 +0300
committerSaeed Mahameed <saeedm@mellanox.com>2018-12-12 01:52:20 +0300
commit939de57d30344ce728b0de61be87984e75af420e (patch)
treedc42835752a817f639508f23b45b1601fd644861 /include/linux/mlx5/cq.h
parent8c4dc42bf6e4ffeda49cf5e26bfc991b548fc0aa (diff)
downloadlinux-939de57d30344ce728b0de61be87984e75af420e.tar.xz
net/mlx5e: Use CQE padding for Ethernet CQs
Writing 64B CQEs to 128B cache lines results in a RMW operation. Padding the CQEs to 128B if possible improves performance on 128B cache line systems like PPC. Testing on PPC showed up to a 24% improvement in small packet throughput vs the default behavior, depending on the workload and system topology. Signed-off-by: Daniel Jurgens <danielj@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'include/linux/mlx5/cq.h')
-rw-r--r--include/linux/mlx5/cq.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 28b757a64029..612c8c2f2466 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -125,9 +125,9 @@ struct mlx5_cq_modify_params {
};
enum {
- CQE_SIZE_64 = 0,
- CQE_SIZE_128 = 1,
- CQE_SIZE_128_PAD = 2,
+ CQE_STRIDE_64 = 0,
+ CQE_STRIDE_128 = 1,
+ CQE_STRIDE_128_PAD = 2,
};
#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
@@ -135,8 +135,8 @@ enum {
static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
{
- return padding_128_en ? CQE_SIZE_128_PAD :
- size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+ return padding_128_en ? CQE_STRIDE_128_PAD :
+ size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
}
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)