summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authortadeusz.struk@intel.com <tadeusz.struk@intel.com>2015-04-01 23:53:06 +0300
committerDavid S. Miller <davem@davemloft.net>2015-04-02 05:59:28 +0300
commit033f46b3c13d4072d8ee6b26dd1e90fdd06895d0 (patch)
tree635f3c1f4d3e5f39da720d0e28e0171a0063a226 /crypto
parent99949a74aa8f1b0b1befbd1afaa6959a3654cd72 (diff)
downloadlinux-033f46b3c13d4072d8ee6b26dd1e90fdd06895d0.tar.xz
crypto: algif - explicitly mark end of data
After the TX sgl is expanded we need to explicitly mark end of data at the last buffer that contains data. Changes in v2 - use type 'bool' and true/false for 'mark'. Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/algif_skcipher.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 2db1eb776932..0aa02635ceda 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -509,11 +509,11 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
struct skcipher_async_req *sreq;
struct ablkcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
- unsigned int len = 0, tx_nents = skcipher_all_sg_nents(ctx);
+ unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
unsigned int reqlen = sizeof(struct skcipher_async_req) +
GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
- int i = 0;
int err = -ENOMEM;
+ bool mark = false;
lock_sock(sk);
req = kmalloc(reqlen, GFP_KERNEL);
@@ -555,7 +555,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
iov_iter_count(&msg->msg_iter));
used = min_t(unsigned long, used, sg->length);
- if (i == tx_nents) {
+ if (txbufs == tx_nents) {
struct scatterlist *tmp;
int x;
/* Ran out of tx slots in async request
@@ -573,10 +573,11 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
kfree(sreq->tsg);
sreq->tsg = tmp;
tx_nents *= 2;
+ mark = true;
}
/* Need to take over the tx sgl from ctx
* to the asynch req - these sgls will be freed later */
- sg_set_page(sreq->tsg + i++, sg_page(sg), sg->length,
+ sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
sg->offset);
if (list_empty(&sreq->list)) {
@@ -604,6 +605,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
iov_iter_advance(&msg->msg_iter, used);
}
+ if (mark)
+ sg_mark_end(sreq->tsg + txbufs - 1);
+
ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
len, sreq->iv);
err = ctx->enc ? crypto_ablkcipher_encrypt(req) :