summaryrefslogtreecommitdiff
path: root/drivers/crypto/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/virtio')
-rw-r--r--drivers/crypto/virtio/Kconfig10
-rw-r--r--drivers/crypto/virtio/Makefile5
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c540
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h128
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c476
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c264
6 files changed, 1423 insertions, 0 deletions
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
new file mode 100644
index 000000000000..d80f73366ae2
--- /dev/null
+++ b/drivers/crypto/virtio/Kconfig
@@ -0,0 +1,10 @@
+config CRYPTO_DEV_VIRTIO
+ tristate "VirtIO crypto driver"
+ depends on VIRTIO
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_BLKCIPHER
+ default m
+ help
+ This driver provides support for virtio crypto device. If you
+ choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 000000000000..dd342c947ff9
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
+virtio_crypto-objs := \
+ virtio_crypto_algs.o \
+ virtio_crypto_mgr.o \
+ virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
new file mode 100644
index 000000000000..c2374df9abae
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -0,0 +1,540 @@
+ /* Algorithms supported by virtio crypto device
+ *
+ * Authors: Gonglei <arei.gonglei@huawei.com>
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+/*
+ * The algs_lock protects the below global virtio_crypto_active_devs
+ * and crypto algorithms registion.
+ */
+static DEFINE_MUTEX(algs_lock);
+static unsigned int virtio_crypto_active_devs;
+
+static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
+{
+ u64 total = 0;
+
+ for (total = 0; sg; sg = sg_next(sg))
+ total += sg->length;
+
+ return total;
+}
+
+static int
+virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
+{
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ pr_err("virtio_crypto: Unsupported key length: %d\n",
+ key_len);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_session(
+ struct virtio_crypto_ablkcipher_ctx *ctx,
+ uint32_t alg, const uint8_t *key,
+ unsigned int keylen,
+ int encrypt)
+{
+ struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
+ unsigned int tmp;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
+ int err;
+ unsigned int num_out = 0, num_in = 0;
+
+ /*
+ * Avoid to do DMA from the stack, switch to using
+ * dynamically-allocated for the key
+ */
+ uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
+
+ if (!cipher_key)
+ return -ENOMEM;
+
+ memcpy(cipher_key, key, keylen);
+
+ spin_lock(&vcrypto->ctrl_lock);
+ /* Pad ctrl header */
+ vcrypto->ctrl.header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+ vcrypto->ctrl.header.algo = cpu_to_le32(alg);
+ /* Set the default dataqueue id to 0 */
+ vcrypto->ctrl.header.queue_id = 0;
+
+ vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ /* Pad cipher's parameters */
+ vcrypto->ctrl.u.sym_create_session.op_type =
+ cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
+ vcrypto->ctrl.header.algo;
+ vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
+ cpu_to_le32(keylen);
+ vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
+ cpu_to_le32(op);
+
+ sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sgs[num_out++] = &outhdr;
+
+ /* Set key */
+ sg_init_one(&key_sg, cipher_key, keylen);
+ sgs[num_out++] = &key_sg;
+
+ /* Return status and session id back */
+ sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
+ sgs[num_out + num_in++] = &inhdr;
+
+ err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+ num_in, vcrypto, GFP_ATOMIC);
+ if (err < 0) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ kzfree(cipher_key);
+ return err;
+ }
+ virtqueue_kick(vcrypto->ctrl_vq);
+
+ /*
+ * Trapping into the hypervisor, so the request should be
+ * handled immediately.
+ */
+ while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+ !virtqueue_is_broken(vcrypto->ctrl_vq))
+ cpu_relax();
+
+ if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ pr_err("virtio_crypto: Create session failed status: %u\n",
+ le32_to_cpu(vcrypto->input.status));
+ kzfree(cipher_key);
+ return -EINVAL;
+ }
+
+ if (encrypt)
+ ctx->enc_sess_info.session_id =
+ le64_to_cpu(vcrypto->input.session_id);
+ else
+ ctx->dec_sess_info.session_id =
+ le64_to_cpu(vcrypto->input.session_id);
+
+ spin_unlock(&vcrypto->ctrl_lock);
+
+ kzfree(cipher_key);
+ return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_close_session(
+ struct virtio_crypto_ablkcipher_ctx *ctx,
+ int encrypt)
+{
+ struct scatterlist outhdr, status_sg, *sgs[2];
+ unsigned int tmp;
+ struct virtio_crypto_destroy_session_req *destroy_session;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int err;
+ unsigned int num_out = 0, num_in = 0;
+
+ spin_lock(&vcrypto->ctrl_lock);
+ vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+ /* Pad ctrl header */
+ vcrypto->ctrl.header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+ /* Set the default virtqueue id to 0 */
+ vcrypto->ctrl.header.queue_id = 0;
+
+ destroy_session = &vcrypto->ctrl.u.destroy_session;
+
+ if (encrypt)
+ destroy_session->session_id =
+ cpu_to_le64(ctx->enc_sess_info.session_id);
+ else
+ destroy_session->session_id =
+ cpu_to_le64(ctx->dec_sess_info.session_id);
+
+ sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sgs[num_out++] = &outhdr;
+
+ /* Return status and session id back */
+ sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
+ sizeof(vcrypto->ctrl_status.status));
+ sgs[num_out + num_in++] = &status_sg;
+
+ err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+ num_in, vcrypto, GFP_ATOMIC);
+ if (err < 0) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ return err;
+ }
+ virtqueue_kick(vcrypto->ctrl_vq);
+
+ while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+ !virtqueue_is_broken(vcrypto->ctrl_vq))
+ cpu_relax();
+
+ if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+ vcrypto->ctrl_status.status,
+ destroy_session->session_id);
+
+ return -EINVAL;
+ }
+ spin_unlock(&vcrypto->ctrl_lock);
+
+ return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_sessions(
+ struct virtio_crypto_ablkcipher_ctx *ctx,
+ const uint8_t *key, unsigned int keylen)
+{
+ uint32_t alg;
+ int ret;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+
+ if (keylen > vcrypto->max_cipher_key_len) {
+ pr_err("virtio_crypto: the key is too long\n");
+ goto bad_key;
+ }
+
+ if (virtio_crypto_alg_validate_key(keylen, &alg))
+ goto bad_key;
+
+ /* Create encryption session */
+ ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ alg, key, keylen, 1);
+ if (ret)
+ return ret;
+ /* Create decryption session */
+ ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ alg, key, keylen, 0);
+ if (ret) {
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ return ret;
+ }
+ return 0;
+
+bad_key:
+ crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+/* Note: kernel crypto API realization */
+static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const uint8_t *key,
+ unsigned int keylen)
+{
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int ret;
+
+ if (!ctx->vcrypto) {
+ /* New key */
+ int node = virtio_crypto_get_current_node();
+ struct virtio_crypto *vcrypto =
+ virtcrypto_get_dev_node(node);
+ if (!vcrypto) {
+ pr_err("virtio_crypto: Could not find a virtio device in the system");
+ return -ENODEV;
+ }
+
+ ctx->vcrypto = vcrypto;
+ } else {
+ /* Rekeying, we should close the created sessions previously */
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ }
+
+ ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
+ if (ret) {
+ virtcrypto_dev_put(ctx->vcrypto);
+ ctx->vcrypto = NULL;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
+ struct ablkcipher_request *req,
+ struct data_queue *data_vq,
+ __u8 op)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ struct virtio_crypto_op_data_req *req_data;
+ int src_nents, dst_nents;
+ int err;
+ unsigned long flags;
+ struct scatterlist outhdr, iv_sg, status_sg, **sgs;
+ int i;
+ u64 dst_len;
+ unsigned int num_out = 0, num_in = 0;
+ int sg_total;
+ uint8_t *iv;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ dst_nents = sg_nents(req->dst);
+
+ pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
+ src_nents, dst_nents);
+
+ /* Why 3? outhdr + iv + inhdr */
+ sg_total = src_nents + dst_nents + 3;
+ sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!sgs)
+ return -ENOMEM;
+
+ req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!req_data) {
+ kfree(sgs);
+ return -ENOMEM;
+ }
+
+ vc_req->req_data = req_data;
+ vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ /* Head of operation */
+ if (op) {
+ req_data->header.session_id =
+ cpu_to_le64(ctx->enc_sess_info.session_id);
+ req_data->header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
+ } else {
+ req_data->header.session_id =
+ cpu_to_le64(ctx->dec_sess_info.session_id);
+ req_data->header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
+ }
+ req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ cpu_to_le32(req->nbytes);
+
+ dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
+ if (unlikely(dst_len > U32_MAX)) {
+ pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
+ err = -EINVAL;
+ goto free;
+ }
+
+ pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
+ req->nbytes, dst_len);
+
+ if (unlikely(req->nbytes + dst_len + ivsize +
+ sizeof(vc_req->status) > vcrypto->max_size)) {
+ pr_err("virtio_crypto: The length is too big\n");
+ err = -EINVAL;
+ goto free;
+ }
+
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ cpu_to_le32((uint32_t)dst_len);
+
+ /* Outhdr */
+ sg_init_one(&outhdr, req_data, sizeof(*req_data));
+ sgs[num_out++] = &outhdr;
+
+ /* IV */
+
+ /*
+ * Avoid to do DMA from the stack, switch to using
+ * dynamically-allocated for the IV
+ */
+ iv = kzalloc_node(ivsize, GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!iv) {
+ err = -ENOMEM;
+ goto free;
+ }
+ memcpy(iv, req->info, ivsize);
+ sg_init_one(&iv_sg, iv, ivsize);
+ sgs[num_out++] = &iv_sg;
+ vc_req->iv = iv;
+
+ /* Source data */
+ for (i = 0; i < src_nents; i++)
+ sgs[num_out++] = &req->src[i];
+
+ /* Destination data */
+ for (i = 0; i < dst_nents; i++)
+ sgs[num_out + num_in++] = &req->dst[i];
+
+ /* Status */
+ sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
+ sgs[num_out + num_in++] = &status_sg;
+
+ vc_req->sgs = sgs;
+
+ spin_lock_irqsave(&data_vq->lock, flags);
+ err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
+ num_in, vc_req, GFP_ATOMIC);
+ virtqueue_kick(data_vq->vq);
+ spin_unlock_irqrestore(&data_vq->lock, flags);
+ if (unlikely(err < 0))
+ goto free_iv;
+
+ return 0;
+
+free_iv:
+ kzfree(iv);
+free:
+ kzfree(req_data);
+ kfree(sgs);
+ return err;
+}
+
+static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int ret;
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+ vc_req->ablkcipher_ctx = ctx;
+ vc_req->ablkcipher_req = req;
+ ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 1);
+ if (ret < 0) {
+ pr_err("virtio_crypto: Encryption failed!\n");
+ return ret;
+ }
+
+ return -EINPROGRESS;
+}
+
+static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int ret;
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+ vc_req->ablkcipher_ctx = ctx;
+ vc_req->ablkcipher_req = req;
+
+ ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 0);
+ if (ret < 0) {
+ pr_err("virtio_crypto: Decryption failed!\n");
+ return ret;
+ }
+
+ return -EINPROGRESS;
+}
+
+static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
+ ctx->tfm = tfm;
+
+ return 0;
+}
+
+static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!ctx->vcrypto)
+ return;
+
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ virtcrypto_dev_put(ctx->vcrypto);
+ ctx->vcrypto = NULL;
+}
+
+static struct crypto_alg virtio_crypto_algs[] = { {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "virtio_crypto_aes_cbc",
+ .cra_priority = 501,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = virtio_crypto_ablkcipher_init,
+ .cra_exit = virtio_crypto_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .setkey = virtio_crypto_ablkcipher_setkey,
+ .decrypt = virtio_crypto_ablkcipher_decrypt,
+ .encrypt = virtio_crypto_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ },
+} };
+
+int virtio_crypto_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++virtio_crypto_active_devs != 1)
+ goto unlock;
+
+ ret = crypto_register_algs(virtio_crypto_algs,
+ ARRAY_SIZE(virtio_crypto_algs));
+ if (ret)
+ virtio_crypto_active_devs--;
+
+unlock:
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void virtio_crypto_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--virtio_crypto_active_devs != 0)
+ goto unlock;
+
+ crypto_unregister_algs(virtio_crypto_algs,
+ ARRAY_SIZE(virtio_crypto_algs));
+
+unlock:
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
new file mode 100644
index 000000000000..3d6566b02876
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -0,0 +1,128 @@
+/* Common header for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _VIRTIO_CRYPTO_COMMON_H
+#define _VIRTIO_CRYPTO_COMMON_H
+
+#include <linux/virtio.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+
+
+/* Internal representation of a data virtqueue */
+struct data_queue {
+ /* Virtqueue associated with this send _queue */
+ struct virtqueue *vq;
+
+ /* To protect the vq operations for the dataq */
+ spinlock_t lock;
+
+ /* Name of the tx queue: dataq.$index */
+ char name[32];
+};
+
+struct virtio_crypto {
+ struct virtio_device *vdev;
+ struct virtqueue *ctrl_vq;
+ struct data_queue *data_vq;
+
+ /* To protect the vq operations for the controlq */
+ spinlock_t ctrl_lock;
+
+ /* Maximum of data queues supported by the device */
+ u32 max_data_queues;
+
+ /* Number of queue currently used by the driver */
+ u32 curr_queue;
+
+ /* Maximum length of cipher key */
+ u32 max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ u32 max_auth_key_len;
+ /* Maximum size of per request */
+ u64 max_size;
+
+ /* Control VQ buffers: protected by the ctrl_lock */
+ struct virtio_crypto_op_ctrl_req ctrl;
+ struct virtio_crypto_session_input input;
+ struct virtio_crypto_inhdr ctrl_status;
+
+ unsigned long status;
+ atomic_t ref_count;
+ struct list_head list;
+ struct module *owner;
+ uint8_t dev_id;
+
+ /* Does the affinity hint is set for virtqueues? */
+ bool affinity_hint_set;
+};
+
+struct virtio_crypto_sym_session_info {
+ /* Backend session id, which come from the host side */
+ __u64 session_id;
+};
+
+struct virtio_crypto_ablkcipher_ctx {
+ struct virtio_crypto *vcrypto;
+ struct crypto_tfm *tfm;
+
+ struct virtio_crypto_sym_session_info enc_sess_info;
+ struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_request {
+ /* Cipher or aead */
+ uint32_t type;
+ uint8_t status;
+ struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
+ struct ablkcipher_request *ablkcipher_req;
+ struct virtio_crypto_op_data_req *req_data;
+ struct scatterlist **sgs;
+ uint8_t *iv;
+};
+
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
+struct list_head *virtcrypto_devmgr_get_head(void);
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_devmgr_get_first(void);
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_get_dev_node(int node);
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
+
+static inline int virtio_crypto_get_current_node(void)
+{
+ int cpu, node;
+
+ cpu = get_cpu();
+ node = topology_physical_package_id(cpu);
+ put_cpu();
+
+ return node;
+}
+
+int virtio_crypto_algs_register(void);
+void virtio_crypto_algs_unregister(void);
+
+#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
new file mode 100644
index 000000000000..fe70ec823b27
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -0,0 +1,476 @@
+ /* Driver for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/virtio_config.h>
+#include <linux/cpu.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+
+static void
+virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
+{
+ if (vc_req) {
+ kzfree(vc_req->iv);
+ kzfree(vc_req->req_data);
+ kfree(vc_req->sgs);
+ }
+}
+
+static void virtcrypto_dataq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct virtio_crypto_request *vc_req;
+ unsigned long flags;
+ unsigned int len;
+ struct ablkcipher_request *ablk_req;
+ int error;
+ unsigned int qid = vq->index;
+
+ spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+ if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ switch (vc_req->status) {
+ case VIRTIO_CRYPTO_OK:
+ error = 0;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ case VIRTIO_CRYPTO_ERR:
+ error = -EINVAL;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ error = -EBADMSG;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+ ablk_req = vc_req->ablkcipher_req;
+ virtcrypto_clear_request(vc_req);
+
+ spin_unlock_irqrestore(
+ &vcrypto->data_vq[qid].lock, flags);
+ /* Finish the encrypt or decrypt process */
+ ablk_req->base.complete(&ablk_req->base, error);
+ spin_lock_irqsave(
+ &vcrypto->data_vq[qid].lock, flags);
+ }
+ }
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
+}
+
+static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+{
+ vq_callback_t **callbacks;
+ struct virtqueue **vqs;
+ int ret = -ENOMEM;
+ int i, total_vqs;
+ const char **names;
+
+ /*
+ * We expect 1 data virtqueue, followed by
+ * possible N-1 data queues used in multiqueue mode,
+ * followed by control vq.
+ */
+ total_vqs = vi->max_data_queues + 1;
+
+ /* Allocate space for find_vqs parameters */
+ vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_vq;
+ callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
+ if (!callbacks)
+ goto err_callback;
+ names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
+ if (!names)
+ goto err_names;
+
+ /* Parameters for control virtqueue */
+ callbacks[total_vqs - 1] = NULL;
+ names[total_vqs - 1] = "controlq";
+
+ /* Allocate/initialize parameters for data virtqueues */
+ for (i = 0; i < vi->max_data_queues; i++) {
+ callbacks[i] = virtcrypto_dataq_callback;
+ snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
+ "dataq.%d", i);
+ names[i] = vi->data_vq[i].name;
+ }
+
+ ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
+ names);
+ if (ret)
+ goto err_find;
+
+ vi->ctrl_vq = vqs[total_vqs - 1];
+
+ for (i = 0; i < vi->max_data_queues; i++) {
+ spin_lock_init(&vi->data_vq[i].lock);
+ vi->data_vq[i].vq = vqs[i];
+ }
+
+ kfree(names);
+ kfree(callbacks);
+ kfree(vqs);
+
+ return 0;
+
+err_find:
+ kfree(names);
+err_names:
+ kfree(callbacks);
+err_callback:
+ kfree(vqs);
+err_vq:
+ return ret;
+}
+
+static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
+{
+ vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
+ GFP_KERNEL);
+ if (!vi->data_vq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
+{
+ int i;
+
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_data_queues; i++)
+ virtqueue_set_affinity(vi->data_vq[i].vq, -1);
+
+ vi->affinity_hint_set = false;
+ }
+}
+
+static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
+{
+ int i = 0;
+ int cpu;
+
+ /*
+ * In single queue mode, we don't set the cpu affinity.
+ */
+ if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
+ virtcrypto_clean_affinity(vcrypto, -1);
+ return;
+ }
+
+ /*
+ * In multiqueue mode, we let the queue to be private to one cpu
+ * by setting the affinity hint to eliminate the contention.
+ *
+ * TODO: adds cpu hotplug support by register cpu notifier.
+ *
+ */
+ for_each_online_cpu(cpu) {
+ virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
+ if (++i >= vcrypto->max_data_queues)
+ break;
+ }
+
+ vcrypto->affinity_hint_set = true;
+}
+
+static void virtcrypto_free_queues(struct virtio_crypto *vi)
+{
+ kfree(vi->data_vq);
+}
+
+static int virtcrypto_init_vqs(struct virtio_crypto *vi)
+{
+ int ret;
+
+ /* Allocate send & receive queues */
+ ret = virtcrypto_alloc_queues(vi);
+ if (ret)
+ goto err;
+
+ ret = virtcrypto_find_vqs(vi);
+ if (ret)
+ goto err_free;
+
+ get_online_cpus();
+ virtcrypto_set_affinity(vi);
+ put_online_cpus();
+
+ return 0;
+
+err_free:
+ virtcrypto_free_queues(vi);
+err:
+ return ret;
+}
+
+static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
+{
+ u32 status;
+ int err;
+
+ virtio_cread(vcrypto->vdev,
+ struct virtio_crypto_config, status, &status);
+
+ /*
+ * Unknown status bits would be a host error and the driver
+ * should consider the device to be broken.
+ */
+ if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
+ dev_warn(&vcrypto->vdev->dev,
+ "Unknown status bits: 0x%x\n", status);
+
+ virtio_break_device(vcrypto->vdev);
+ return -EPERM;
+ }
+
+ if (vcrypto->status == status)
+ return 0;
+
+ vcrypto->status = status;
+
+ if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
+ err = virtcrypto_dev_start(vcrypto);
+ if (err) {
+ dev_err(&vcrypto->vdev->dev,
+ "Failed to start virtio crypto device.\n");
+
+ return -EPERM;
+ }
+ dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
+ } else {
+ virtcrypto_dev_stop(vcrypto);
+ dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
+ }
+
+ return 0;
+}
+
+static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
+{
+ struct virtio_device *vdev = vcrypto->vdev;
+
+ virtcrypto_clean_affinity(vcrypto, -1);
+
+ vdev->config->del_vqs(vdev);
+
+ virtcrypto_free_queues(vcrypto);
+}
+
+static int virtcrypto_probe(struct virtio_device *vdev)
+{
+ int err = -EFAULT;
+ struct virtio_crypto *vcrypto;
+ u32 max_data_queues = 0, max_cipher_key_len = 0;
+ u32 max_auth_key_len = 0;
+ u64 max_size = 0;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ return -ENODEV;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
+ dev_to_node(&vdev->dev));
+ if (!vcrypto)
+ return -ENOMEM;
+
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_dataqueues, &max_data_queues);
+ if (max_data_queues < 1)
+ max_data_queues = 1;
+
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_cipher_key_len, &max_cipher_key_len);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_auth_key_len, &max_auth_key_len);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_size, &max_size);
+
+ /* Add virtio crypto device to global table */
+ err = virtcrypto_devmgr_add_dev(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
+ goto free;
+ }
+ vcrypto->owner = THIS_MODULE;
+ vcrypto = vdev->priv = vcrypto;
+ vcrypto->vdev = vdev;
+
+ spin_lock_init(&vcrypto->ctrl_lock);
+
+ /* Use single data queue as default */
+ vcrypto->curr_queue = 1;
+ vcrypto->max_data_queues = max_data_queues;
+ vcrypto->max_cipher_key_len = max_cipher_key_len;
+ vcrypto->max_auth_key_len = max_auth_key_len;
+ vcrypto->max_size = max_size;
+
+ dev_info(&vdev->dev,
+ "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
+ vcrypto->max_data_queues,
+ vcrypto->max_cipher_key_len,
+ vcrypto->max_auth_key_len,
+ vcrypto->max_size);
+
+ err = virtcrypto_init_vqs(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to initialize vqs.\n");
+ goto free_dev;
+ }
+ virtio_device_ready(vdev);
+
+ err = virtcrypto_update_status(vcrypto);
+ if (err)
+ goto free_vqs;
+
+ return 0;
+
+free_vqs:
+ vcrypto->vdev->config->reset(vdev);
+ virtcrypto_del_vqs(vcrypto);
+free_dev:
+ virtcrypto_devmgr_rm_dev(vcrypto);
+free:
+ kfree(vcrypto);
+ return err;
+}
+
+static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
+{
+ struct virtio_crypto_request *vc_req;
+ int i;
+ struct virtqueue *vq;
+
+ for (i = 0; i < vcrypto->max_data_queues; i++) {
+ vq = vcrypto->data_vq[i].vq;
+ while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
+ kfree(vc_req->req_data);
+ kfree(vc_req->sgs);
+ }
+ }
+}
+
+static void virtcrypto_remove(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
+ vdev->config->reset(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ virtcrypto_del_vqs(vcrypto);
+ virtcrypto_devmgr_rm_dev(vcrypto);
+ kfree(vcrypto);
+}
+
+static void virtcrypto_config_changed(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ virtcrypto_update_status(vcrypto);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtcrypto_freeze(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ vdev->config->reset(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
+
+ virtcrypto_del_vqs(vcrypto);
+ return 0;
+}
+
+static int virtcrypto_restore(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+ int err;
+
+ err = virtcrypto_init_vqs(vcrypto);
+ if (err)
+ return err;
+
+ virtio_device_ready(vdev);
+ err = virtcrypto_dev_start(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#endif
+
+static unsigned int features[] = {
+ /* none */
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_crypto_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = virtcrypto_probe,
+ .remove = virtcrypto_remove,
+ .config_changed = virtcrypto_config_changed,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtcrypto_freeze,
+ .restore = virtcrypto_restore,
+#endif
+};
+
+module_virtio_driver(virtio_crypto_driver);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("virtio crypto device driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
new file mode 100644
index 000000000000..a69ff71de2c4
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -0,0 +1,264 @@
+ /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+static LIST_HEAD(virtio_crypto_table);
+static uint32_t num_devices;
+
+/* The table_lock protects the above global list and num_devices */
+static DEFINE_MUTEX(table_lock);
+
+#define VIRTIO_CRYPTO_MAX_DEVICES 32
+
+
+/*
+ * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
+ * framework.
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Function adds virtio crypto device to the global list.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
+{
+ struct list_head *itr;
+
+ mutex_lock(&table_lock);
+ if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
+ pr_info("virtio_crypto: only support up to %d devices\n",
+ VIRTIO_CRYPTO_MAX_DEVICES);
+ mutex_unlock(&table_lock);
+ return -EFAULT;
+ }
+
+ list_for_each(itr, &virtio_crypto_table) {
+ struct virtio_crypto *ptr =
+ list_entry(itr, struct virtio_crypto, list);
+
+ if (ptr == vcrypto_dev) {
+ mutex_unlock(&table_lock);
+ return -EEXIST;
+ }
+ }
+ atomic_set(&vcrypto_dev->ref_count, 0);
+ list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
+ vcrypto_dev->dev_id = num_devices++;
+ mutex_unlock(&table_lock);
+ return 0;
+}
+
+struct list_head *virtcrypto_devmgr_get_head(void)
+{
+ return &virtio_crypto_table;
+}
+
+/*
+ * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
+ * framework.
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Function removes virtio crypto device from the acceleration framework.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
+{
+ mutex_lock(&table_lock);
+ list_del(&vcrypto_dev->list);
+ num_devices--;
+ mutex_unlock(&table_lock);
+}
+
+/*
+ * virtcrypto_devmgr_get_first()
+ *
+ * Function returns the first virtio crypto device from the acceleration
+ * framework.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_devmgr_get_first(void)
+{
+ struct virtio_crypto *dev = NULL;
+
+ mutex_lock(&table_lock);
+ if (!list_empty(&virtio_crypto_table))
+ dev = list_first_entry(&virtio_crypto_table,
+ struct virtio_crypto,
+ list);
+ mutex_unlock(&table_lock);
+ return dev;
+}
+
+/*
+ * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
+{
+ return atomic_read(&vcrypto_dev->ref_count) != 0;
+}
+
+/*
+ * virtcrypto_dev_get() - Increment vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Increment the vcrypto_dev refcount and if this is the first time
+ * incrementing it during this period the vcrypto_dev is in use,
+ * increment the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
+{
+ if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
+ if (!try_module_get(vcrypto_dev->owner))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Decrement the vcrypto_dev refcount and if this is the last time
+ * decrementing it during this period the vcrypto_dev is in use,
+ * decrement the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
+{
+ if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
+ module_put(vcrypto_dev->owner);
+}
+
+/*
+ * virtcrypto_dev_started() - Check whether device has started
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
+{
+ return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
+}
+
+/*
+ * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
+ * @node: Node id the driver works.
+ *
+ * Function returns the virtio crypto device used fewest on the node.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_get_dev_node(int node)
+{
+ struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
+ unsigned long best = ~0;
+ unsigned long ctr;
+
+ mutex_lock(&table_lock);
+ list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
+
+ if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
+ dev_to_node(&tmp_dev->vdev->dev) < 0) &&
+ virtcrypto_dev_started(tmp_dev)) {
+ ctr = atomic_read(&tmp_dev->ref_count);
+ if (best > ctr) {
+ vcrypto_dev = tmp_dev;
+ best = ctr;
+ }
+ }
+ }
+
+ if (!vcrypto_dev) {
+ pr_info("virtio_crypto: Could not find a device on node %d\n",
+ node);
+ /* Get any started device */
+ list_for_each_entry(tmp_dev,
+ virtcrypto_devmgr_get_head(), list) {
+ if (virtcrypto_dev_started(tmp_dev)) {
+ vcrypto_dev = tmp_dev;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&table_lock);
+ if (!vcrypto_dev)
+ return NULL;
+
+ virtcrypto_dev_get(vcrypto_dev);
+ return vcrypto_dev;
+}
+
+/*
+ * virtcrypto_dev_start() - Start virtio crypto device
+ * @vcrypto: Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, EFAULT when fail to register algorithms
+ */
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
+{
+ if (virtio_crypto_algs_register()) {
+ pr_err("virtio_crypto: Failed to register crypto algs\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * virtcrypto_dev_stop() - Stop virtio crypto device
+ * @vcrypto: Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
+{
+ virtio_crypto_algs_unregister();
+}