summaryrefslogtreecommitdiff
path: root/include/net/xsk_buff_pool.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/xsk_buff_pool.h')
-rw-r--r--include/net/xsk_buff_pool.h27
1 files changed, 15 insertions, 12 deletions
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 356d0ac74eba..38d03a64c9ea 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -39,9 +39,22 @@ struct xsk_dma_map {
};
struct xsk_buff_pool {
- struct xsk_queue *fq;
- struct xsk_queue *cq;
+ /* Members only used in the control path first. */
+ struct device *dev;
+ struct net_device *netdev;
+ struct list_head xsk_tx_list;
+ /* Protects modifications to the xsk_tx_list */
+ spinlock_t xsk_tx_list_lock;
+ refcount_t users;
+ struct xdp_umem *umem;
+ struct work_struct work;
struct list_head free_list;
+ u32 heads_cnt;
+ u16 queue_id;
+
+ /* Data path members as close to free_heads at the end as possible. */
+ struct xsk_queue *fq ____cacheline_aligned_in_smp;
+ struct xsk_queue *cq;
/* For performance reasons, each buff pool has its own array of dma_pages
* even when they are identical.
*/
@@ -51,25 +64,15 @@ struct xsk_buff_pool {
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
- u32 heads_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
- u16 queue_id;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
- struct xdp_umem *umem;
void *addrs;
- struct device *dev;
- struct net_device *netdev;
- struct list_head xsk_tx_list;
- /* Protects modifications to the xsk_tx_list */
- spinlock_t xsk_tx_list_lock;
- refcount_t users;
- struct work_struct work;
struct xdp_buff_xsk *free_heads[];
};