summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorMing Lin <ming.l@ssi.samsung.com>2016-03-22 10:24:43 +0300
committerJens Axboe <axboe@fb.com>2016-04-12 22:44:00 +0300
commit58b45602751ddf16e57170656670aa5a8f78eeca (patch)
treee8bbe385e5f4fd4dea3139937db75b66fcf022a5 /drivers/nvme
parent2e39e0f608c130411f52c9fe5648dbcda5e28528 (diff)
downloadlinux-58b45602751ddf16e57170656670aa5a8f78eeca.tar.xz
nvme: add helper nvme_map_len()
The helper returns the number of bytes that need to be mapped using PRPs/SGL entries. Signed-off-by: Ming Lin <ming.l@ssi.samsung.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c13
2 files changed, 13 insertions, 8 deletions
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f846da4eb338..6376cd71cc9f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -173,6 +173,14 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
+static inline unsigned nvme_map_len(struct request *rq)
+{
+ if (rq->cmd_flags & REQ_DISCARD)
+ return sizeof(struct nvme_dsm_range);
+ else
+ return blk_rq_bytes(rq);
+}
+
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 154194e33c31..d23ede73537d 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -334,16 +334,11 @@ static __le64 **iod_list(struct request *req)
return (__le64 **)(iod->sg + req->nr_phys_segments);
}
-static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, unsigned size,
+ struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = rq->nr_phys_segments;
- unsigned size;
-
- if (rq->cmd_flags & REQ_DISCARD)
- size = sizeof(struct nvme_dsm_range);
- else
- size = blk_rq_bytes(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -637,6 +632,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_command cmnd;
+ unsigned map_len;
int ret = BLK_MQ_RQ_QUEUE_OK;
/*
@@ -652,7 +648,8 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
- ret = nvme_init_iod(req, dev);
+ map_len = nvme_map_len(req);
+ ret = nvme_init_iod(req, map_len, dev);
if (ret)
return ret;