summaryrefslogtreecommitdiff
path: root/drivers/nvme/target/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target/tcp.c')
-rw-r--r--drivers/nvme/target/tcp.c62
1 files changed, 44 insertions, 18 deletions
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index dc1f0f647189..8b0485ada315 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length = cmd->pdu_len;
cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done;
- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
+ cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
sg = &cmd->req.sg[cmd->sg_idx];
@@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length -= iov_len;
sg = sg_next(sg);
iov++;
+ sg_offset = 0;
}
iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
@@ -378,7 +379,7 @@ err:
return NVME_SC_INTERNAL;
}
-static void nvmet_tcp_ddgst(struct ahash_request *hash,
+static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
struct nvmet_tcp_cmd *cmd)
{
ahash_request_set_crypt(hash, cmd->req.sg,
@@ -386,6 +387,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash,
crypto_ahash_digest(hash);
}
+static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
+ struct nvmet_tcp_cmd *cmd)
+{
+ struct scatterlist sg;
+ struct kvec *iov;
+ int i;
+
+ crypto_ahash_init(hash);
+ for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
+ sg_init_one(&sg, iov->iov_base, iov->iov_len);
+ ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
+ crypto_ahash_update(hash);
+ }
+ ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
+ crypto_ahash_final(hash);
+}
+
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
{
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
@@ -410,7 +428,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
if (queue->data_digest) {
pdu->hdr.flags |= NVME_TCP_F_DDGST;
- nvmet_tcp_ddgst(queue->snd_hash, cmd);
+ nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
}
if (cmd->queue->hdr_digest) {
@@ -1059,7 +1077,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
{
struct nvmet_tcp_queue *queue = cmd->queue;
- nvmet_tcp_ddgst(queue->rcv_hash, cmd);
+ nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
queue->offset = 0;
queue->left = NVME_TCP_DIGEST_LENGTH;
queue->rcv_state = NVMET_TCP_RECV_DDGST;
@@ -1080,14 +1098,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
cmd->rbytes_done += ret;
}
+ if (queue->data_digest) {
+ nvmet_tcp_prep_recv_ddgst(cmd);
+ return 0;
+ }
nvmet_tcp_unmap_pdu_iovec(cmd);
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len) {
- if (queue->data_digest) {
- nvmet_tcp_prep_recv_ddgst(cmd);
- return 0;
- }
cmd->req.execute(&cmd->req);
}
@@ -1467,17 +1485,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
if (inet->rcv_tos > 0)
ip_sock_set_tos(sock->sk, inet->rcv_tos);
+ ret = 0;
write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = queue;
- queue->data_ready = sock->sk->sk_data_ready;
- sock->sk->sk_data_ready = nvmet_tcp_data_ready;
- queue->state_change = sock->sk->sk_state_change;
- sock->sk->sk_state_change = nvmet_tcp_state_change;
- queue->write_space = sock->sk->sk_write_space;
- sock->sk->sk_write_space = nvmet_tcp_write_space;
+ if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ /*
+ * If the socket is already closing, don't even start
+ * consuming it
+ */
+ ret = -ENOTCONN;
+ } else {
+ sock->sk->sk_user_data = queue;
+ queue->data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+ queue->state_change = sock->sk->sk_state_change;
+ sock->sk->sk_state_change = nvmet_tcp_state_change;
+ queue->write_space = sock->sk->sk_write_space;
+ sock->sk->sk_write_space = nvmet_tcp_write_space;
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ }
write_unlock_bh(&sock->sk->sk_callback_lock);
- return 0;
+ return ret;
}
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1525,8 +1553,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_destroy_sq;
- queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
-
return 0;
out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex);