summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-10-18 19:12:12 +0300
committerJens Axboe <axboe@kernel.dk>2021-10-19 18:21:42 +0300
commitbc490f81731e181b07b8d7577425c06ae91692c8 (patch)
treeabe26a3d3c309e8a5d62e61e50ccd8a13036aebb /block/blk-core.c
parent480d42dc001bbfe953825a92073012fcd5a99161 (diff)
downloadlinux-bc490f81731e181b07b8d7577425c06ae91692c8.tar.xz
block: change plugging to use a singly linked list
Use a singly linked list for the blk_plug. This saves 8 bytes in the blk_plug struct, and makes for faster list manipulations than doubly linked lists. As we don't use the doubly linked lists for anything, singly linked is just fine. This yields a bump in default (merging enabled) performance from 7.0 to 7.1M IOPS, and ~7.5M IOPS with merging disabled. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index d0c2e11411d0..14d20909f61a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1550,7 +1550,7 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
if (tsk->plug)
return;
- INIT_LIST_HEAD(&plug->mq_list);
+ plug->mq_list = NULL;
plug->cached_rq = NULL;
plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
plug->rq_count = 0;
@@ -1640,7 +1640,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
flush_plug_callbacks(plug, from_schedule);
- if (!list_empty(&plug->mq_list))
+ if (!rq_list_empty(plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule);
if (unlikely(!from_schedule && plug->cached_rq))
blk_mq_free_plug_rqs(plug);