summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fb.com>2017-01-20 00:08:49 +0300
committerJens Axboe <axboe@fb.com>2017-01-20 00:31:50 +0300
commitd61b7f972dab2a7d187c38254845546dfc8eed85 (patch)
treec9ee8c99e826639db76b38e5f8ed142e4cb1412f
parent88a7503376f4f3bf303c809d1a389739e1205614 (diff)
downloadlinux-d61b7f972dab2a7d187c38254845546dfc8eed85.tar.xz
nbd: only set MSG_MORE when we have more to send
A user noticed that write performance was horrible over loopback and we traced it to an inversion of when we need to set MSG_MORE. It should be set when we have more bvec's to send, not when we are on the last bvec. This patch made the test go from 20 iops to 78k iops. Signed-off-by: Josef Bacik <jbacik@fb.com> Fixes: 429a787be679 ("nbd: fix use-after-free of rq/bio in the xmit path") Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/nbd.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 50a2020b5b72..9fd06eeb1a17 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
- int result, flags;
+ int result;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
@@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
if (type != NBD_CMD_WRITE)
return 0;
- flags = 0;
bio = req->bio;
while (bio) {
struct bio *next = bio->bi_next;
@@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
bio_for_each_segment(bvec, bio, iter) {
bool is_last = !next && bio_iter_last(bvec, iter);
+ int flags = is_last ? 0 : MSG_MORE;
- if (is_last)
- flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
cmd, bvec.bv_len);
result = sock_send_bvec(nbd, index, &bvec, flags);