summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-04-14 10:42:24 +0300
committerJens Axboe <axboe@kernel.dk>2020-04-22 19:47:35 +0300
commitcc97923a5bccc776851c242b61015faf288d5c22 (patch)
treed6510d4f326674e96bdf70c4f07472f492537db8 /block
parent0475bd6c65976c390e3805a1e5f10fc30ca8def2 (diff)
downloadlinux-cc97923a5bccc776851c242b61015faf288d5c22.tar.xz
block: move dma drain handling to scsi
Don't burden the common block code with with specifics of the libata DMA draining mechanism. Instead move most of the code to the scsi midlayer. That also means the nr_phys_segments adjustments in the blk-mq fast path can go away entirely, given that SCSI never looks at nr_phys_segments after mapping the request to a scatterlist. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c14
-rw-r--r--block/blk-mq.c11
-rw-r--r--block/blk-settings.c37
3 files changed, 0 insertions, 62 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index ee618cdb141e..25f5a5e00ee6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -539,20 +539,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
rq->extra_len += pad_len;
}
- if (q->dma_drain_size && q->dma_drain_needed(rq)) {
- if (op_is_write(req_op(rq)))
- memset(q->dma_drain_buffer, 0, q->dma_drain_size);
-
- sg_unmark_end(*last_sg);
- *last_sg = sg_next(*last_sg);
- sg_set_page(*last_sg, virt_to_page(q->dma_drain_buffer),
- q->dma_drain_size,
- ((unsigned long)q->dma_drain_buffer) &
- (PAGE_SIZE - 1));
- nsegs++;
- rq->extra_len += q->dma_drain_size;
- }
-
if (*last_sg)
sg_mark_end(*last_sg);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cf95e8e0881a..2c105cb2a75b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -667,15 +667,6 @@ void blk_mq_start_request(struct request *rq)
blk_add_timer(rq);
WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
- if (q->dma_drain_size && blk_rq_bytes(rq)) {
- /*
- * Make sure space for the drain appears. We know we can do
- * this because max_hw_segments has been adjusted to be one
- * fewer than the device can handle.
- */
- rq->nr_phys_segments++;
- }
-
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
q->integrity.profile->prepare_fn(rq);
@@ -695,8 +686,6 @@ static void __blk_mq_requeue_request(struct request *rq)
if (blk_mq_request_started(rq)) {
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
rq->rq_flags &= ~RQF_TIMED_OUT;
- if (q->dma_drain_size && blk_rq_bytes(rq))
- rq->nr_phys_segments--;
}
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 14397b4c4b53..2ab1967b9716 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -652,43 +652,6 @@ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
EXPORT_SYMBOL(blk_queue_update_dma_pad);
/**
- * blk_queue_dma_drain - Set up a drain buffer for excess dma.
- * @q: the request queue for the device
- * @dma_drain_needed: fn which returns non-zero if drain is necessary
- * @buf: physically contiguous buffer
- * @size: size of the buffer in bytes
- *
- * Some devices have excess DMA problems and can't simply discard (or
- * zero fill) the unwanted piece of the transfer. They have to have a
- * real area of memory to transfer it into. The use case for this is
- * ATAPI devices in DMA mode. If the packet command causes a transfer
- * bigger than the transfer size some HBAs will lock up if there
- * aren't DMA elements to contain the excess transfer. What this API
- * does is adjust the queue so that the buf is always appended
- * silently to the scatterlist.
- *
- * Note: This routine adjusts max_hw_segments to make room for appending
- * the drain buffer. If you call blk_queue_max_segments() after calling
- * this routine, you must set the limit to one fewer than your device
- * can support otherwise there won't be room for the drain buffer.
- */
-int blk_queue_dma_drain(struct request_queue *q,
- dma_drain_needed_fn *dma_drain_needed,
- void *buf, unsigned int size)
-{
- if (queue_max_segments(q) < 2)
- return -EINVAL;
- /* make room for appending the drain */
- blk_queue_max_segments(q, queue_max_segments(q) - 1);
- q->dma_drain_needed = dma_drain_needed;
- q->dma_drain_buffer = buf;
- q->dma_drain_size = size;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
-
-/**
* blk_queue_segment_boundary - set boundary rules for segment merging
* @q: the request queue for the device
* @mask: the memory boundary mask