summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2020-03-13 02:06:39 +0300
committerKeith Busch <kbusch@kernel.org>2020-03-25 22:51:55 +0300
commite90d172b11b845b0f2caa9422c2f9d3ef59af575 (patch)
tree327c73342c21e739bc7e2252c7882079aa5aa2a2 /drivers/nvme
parent8d8a50e20dc2dc41cb788085968b9024dc36f7a5 (diff)
downloadlinux-e90d172b11b845b0f2caa9422c2f9d3ef59af575.tar.xz
nvmet-tcp: optimize tcp stack TX when data digest is used
If we have a 4-byte data digest to send to the wire, but we have more data to send, set MSG_MORE to tell the stack that more is coming. Reviewed-by: Mark Wunderlich <mark.wunderlich@intel.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Keith Busch <kbusch@kernel.org>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/target/tcp.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 1942c941c31c..dcee4995e22d 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -626,7 +626,7 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
return 1;
}
-static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
+static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{
struct nvmet_tcp_queue *queue = cmd->queue;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
@@ -636,6 +636,9 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
};
int ret;
+ if (!last_in_batch && cmd->queue->send_list_len)
+ msg.msg_flags |= MSG_MORE;
+
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (unlikely(ret <= 0))
return ret;
@@ -676,7 +679,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
}
if (cmd->state == NVMET_TCP_SEND_DDGST) {
- ret = nvmet_try_send_ddgst(cmd);
+ ret = nvmet_try_send_ddgst(cmd, last_in_batch);
if (ret <= 0)
goto done_send;
}