summaryrefslogtreecommitdiff
path: root/drivers/virtio
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-01-19 02:57:25 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2024-01-19 02:57:25 +0300
commit244aefb1c64ad562b48929e6d85e07bc79e331d6 (patch)
treed98bb8e0c9533278f8ee4341a9dd8752eedcbd15 /drivers/virtio
parent86c4d58a99ab1ccfa03860d4dead157be51eb2b6 (diff)
parent78f70c02bdbccb5e9b0b0c728185d4aeb7044ace (diff)
downloadlinux-244aefb1c64ad562b48929e6d85e07bc79e331d6.tar.xz
Merge tag 'vfio-v6.8-rc1' of https://github.com/awilliam/linux-vfio
Pull VFIO updates from Alex Williamson: - Add debugfs support, initially used for reporting device migration state (Longfang Liu) - Fixes and support for migration dirty tracking across multiple IOVA regions in the pds-vfio-pci driver (Brett Creeley) - Improved IOMMU allocation accounting visibility (Pasha Tatashin) - Virtio infrastructure and a new virtio-vfio-pci variant driver, which provides emulation of a legacy virtio interfaces on modern virtio hardware for virtio-net VF devices where the PF driver exposes support for legacy admin queues, ie. an emulated IO BAR on an SR-IOV VF to provide driver ABI compatibility to legacy devices (Yishai Hadas & Feng Liu) - Migration fixes for the hisi-acc-vfio-pci variant driver (Shameer Kolothum) - Kconfig dependency fix for new virtio-vfio-pci variant driver (Arnd Bergmann) * tag 'vfio-v6.8-rc1' of https://github.com/awilliam/linux-vfio: (22 commits) vfio/virtio: fix virtio-pci dependency hisi_acc_vfio_pci: Update migration data pointer correctly on saving/resume vfio/virtio: Declare virtiovf_pci_aer_reset_done() static vfio/virtio: Introduce a vfio driver over virtio devices vfio/pci: Expose vfio_pci_core_iowrite/read##size() vfio/pci: Expose vfio_pci_core_setup_barmap() virtio-pci: Introduce APIs to execute legacy IO admin commands virtio-pci: Initialize the supported admin commands virtio-pci: Introduce admin commands virtio-pci: Introduce admin command sending function virtio-pci: Introduce admin virtqueue virtio: Define feature bit for administration virtqueue vfio/type1: account iommu allocations vfio/pds: Add multi-region support vfio/pds: Move seq/ack bitmaps into region struct vfio/pds: Pass region info to relevant functions vfio/pds: Move and rename region specific info vfio/pds: Only use a single SGL for both seq and ack vfio/pds: Fix calculations in pds_vfio_dirty_sync MAINTAINERS: Add vfio debugfs interface doc link ...
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/Kconfig5
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c37
-rw-r--r--drivers/virtio/virtio_pci_admin_legacy_io.c244
-rw-r--r--drivers/virtio/virtio_pci_common.c14
-rw-r--r--drivers/virtio/virtio_pci_common.h42
-rw-r--r--drivers/virtio/virtio_pci_modern.c259
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c24
8 files changed, 618 insertions, 8 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 0a53a61231c2..c17193544268 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -60,6 +60,11 @@ config VIRTIO_PCI
If unsure, say M.
+config VIRTIO_PCI_ADMIN_LEGACY
+ bool
+ depends on VIRTIO_PCI && (X86 || COMPILE_TEST)
+ default y
+
config VIRTIO_PCI_LEGACY
bool "Support for legacy virtio draft 0.9.X and older devices"
default y
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 8e98d24917cc..73ace62af440 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
+virtio_pci-$(CONFIG_VIRTIO_PCI_ADMIN_LEGACY) += virtio_pci_admin_legacy_io.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 3893dc29eb26..f4080692b351 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -302,9 +302,15 @@ static int virtio_dev_probe(struct device *_d)
if (err)
goto err;
+ if (dev->config->create_avq) {
+ err = dev->config->create_avq(dev);
+ if (err)
+ goto err;
+ }
+
err = drv->probe(dev);
if (err)
- goto err;
+ goto err_probe;
/* If probe didn't do it, mark device DRIVER_OK ourselves. */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
@@ -316,6 +322,10 @@ static int virtio_dev_probe(struct device *_d)
virtio_config_enable(dev);
return 0;
+
+err_probe:
+ if (dev->config->destroy_avq)
+ dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
@@ -331,6 +341,9 @@ static void virtio_dev_remove(struct device *_d)
drv->remove(dev);
+ if (dev->config->destroy_avq)
+ dev->config->destroy_avq(dev);
+
/* Driver should have reset device. */
WARN_ON_ONCE(dev->config->get_status(dev));
@@ -489,13 +502,20 @@ EXPORT_SYMBOL_GPL(unregister_virtio_device);
int virtio_device_freeze(struct virtio_device *dev)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
virtio_config_disable(dev);
dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
- if (drv && drv->freeze)
- return drv->freeze(dev);
+ if (drv && drv->freeze) {
+ ret = drv->freeze(dev);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->config->destroy_avq)
+ dev->config->destroy_avq(dev);
return 0;
}
@@ -532,10 +552,16 @@ int virtio_device_restore(struct virtio_device *dev)
if (ret)
goto err;
+ if (dev->config->create_avq) {
+ ret = dev->config->create_avq(dev);
+ if (ret)
+ goto err;
+ }
+
if (drv->restore) {
ret = drv->restore(dev);
if (ret)
- goto err;
+ goto err_restore;
}
/* If restore didn't do it, mark device DRIVER_OK ourselves. */
@@ -546,6 +572,9 @@ int virtio_device_restore(struct virtio_device *dev)
return 0;
+err_restore:
+ if (dev->config->destroy_avq)
+ dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;
diff --git a/drivers/virtio/virtio_pci_admin_legacy_io.c b/drivers/virtio/virtio_pci_admin_legacy_io.c
new file mode 100644
index 000000000000..819cfbbc67c3
--- /dev/null
+++ b/drivers/virtio/virtio_pci_admin_legacy_io.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/virtio_pci_admin.h>
+#include "virtio_pci_common.h"
+
+/*
+ * virtio_pci_admin_has_legacy_io - Checks whether the legacy IO
+ * commands are supported
+ * @dev: VF pci_dev
+ *
+ * Returns true on success.
+ */
+bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_pci_device *vp_dev;
+
+ if (!virtio_dev)
+ return false;
+
+ if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ vp_dev = to_vp_device(virtio_dev);
+
+ if ((vp_dev->admin_vq.supported_cmds & VIRTIO_LEGACY_ADMIN_CMD_BITMAP) ==
+ VIRTIO_LEGACY_ADMIN_CMD_BITMAP)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_has_legacy_io);
+
+static int virtio_pci_admin_legacy_io_write(struct pci_dev *pdev, u16 opcode,
+ u8 offset, u8 size, u8 *buf)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_legacy_wr_data *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data) + size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->offset = offset;
+ memcpy(data->registers, buf, size);
+ sg_init_one(&data_sg, data, sizeof(*data) + size);
+ cmd.opcode = cpu_to_le16(opcode);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+ return ret;
+}
+
+/*
+ * virtio_pci_admin_legacy_io_write_common - Write legacy common configuration
+ * of a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the common configuration area to write to
+ * @size: size of the data to write
+ * @buf: buffer which holds the data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_write(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_write);
+
+/*
+ * virtio_pci_admin_legacy_io_write_device - Write legacy device configuration
+ * of a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the device configuration area to write to
+ * @size: size of the data to write
+ * @buf: buffer which holds the data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_write(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_write);
+
+static int virtio_pci_admin_legacy_io_read(struct pci_dev *pdev, u16 opcode,
+ u8 offset, u8 size, u8 *buf)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_legacy_rd_data *data;
+ struct scatterlist data_sg, result_sg;
+ struct virtio_admin_cmd cmd = {};
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->offset = offset;
+ sg_init_one(&data_sg, data, sizeof(*data));
+ sg_init_one(&result_sg, buf, size);
+ cmd.opcode = cpu_to_le16(opcode);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+ return ret;
+}
+
+/*
+ * virtio_pci_admin_legacy_device_io_read - Read legacy device configuration of
+ * a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the device configuration area to read from
+ * @size: size of the data to be read
+ * @buf: buffer to hold the returned data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_read(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_read);
+
+/*
+ * virtio_pci_admin_legacy_common_io_read - Read legacy common configuration of
+ * a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the common configuration area to read from
+ * @size: size of the data to be read
+ * @buf: buffer to hold the returned data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_read(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_read);
+
+/*
+ * virtio_pci_admin_legacy_io_notify_info - Read the queue notification
+ * information for legacy interface
+ * @dev: VF pci_dev
+ * @req_bar_flags: requested bar flags
+ * @bar: on output the BAR number of the owner or member device
+ * @bar_offset: on output the offset within bar
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
+ u8 req_bar_flags, u8 *bar,
+ u64 *bar_offset)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_notify_info_result *result;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ sg_init_one(&result_sg, result, sizeof(*result));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (!ret) {
+ struct virtio_admin_cmd_notify_info_data *entry;
+ int i;
+
+ ret = -ENOENT;
+ for (i = 0; i < VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO; i++) {
+ entry = &result->entries[i];
+ if (entry->flags == VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END)
+ break;
+ if (entry->flags != req_bar_flags)
+ continue;
+ *bar = entry->bar;
+ *bar_offset = le64_to_cpu(entry->offset);
+ ret = 0;
+ break;
+ }
+ }
+
+ kfree(result);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_io_notify_info);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 7a5593997e0e..9f93330fc2cc 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -236,6 +236,9 @@ void vp_del_vqs(struct virtio_device *vdev)
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ if (vp_dev->is_avq(vdev, vq->index))
+ continue;
+
if (vp_dev->per_vq_vectors) {
int v = vp_dev->vqs[vq->index]->msix_vector;
@@ -642,6 +645,17 @@ static struct pci_driver virtio_pci_driver = {
.sriov_configure = virtio_pci_sriov_configure,
};
+struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev)
+{
+ struct virtio_pci_device *pf_vp_dev;
+
+ pf_vp_dev = pci_iov_get_pf_drvdata(pdev, &virtio_pci_driver);
+ if (IS_ERR(pf_vp_dev))
+ return NULL;
+
+ return &pf_vp_dev->vdev;
+}
+
module_pci_driver(virtio_pci_driver);
MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 4b773bd7c58c..7fef52bee455 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -29,6 +29,7 @@
#include <linux/virtio_pci_modern.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
struct virtio_pci_vq_info {
/* the actual virtqueue */
@@ -41,6 +42,17 @@ struct virtio_pci_vq_info {
unsigned int msix_vector;
};
+struct virtio_pci_admin_vq {
+ /* Virtqueue info associated with this admin queue. */
+ struct virtio_pci_vq_info info;
+ /* serializing admin commands execution and virtqueue deletion */
+ struct mutex cmd_lock;
+ u64 supported_cmds;
+ /* Name of the admin queue: avq.$vq_index. */
+ char name[10];
+ u16 vq_index;
+};
+
/* Our device structure */
struct virtio_pci_device {
struct virtio_device vdev;
@@ -58,9 +70,13 @@ struct virtio_pci_device {
spinlock_t lock;
struct list_head virtqueues;
- /* array of all queues for house-keeping */
+ /* Array of all virtqueues reported in the
+ * PCI common config num_queues field
+ */
struct virtio_pci_vq_info **vqs;
+ struct virtio_pci_admin_vq admin_vq;
+
/* MSI-X support */
int msix_enabled;
int intx_enabled;
@@ -86,6 +102,7 @@ struct virtio_pci_device {
void (*del_vq)(struct virtio_pci_vq_info *info);
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
+ bool (*is_avq)(struct virtio_device *vdev, unsigned int index);
};
/* Constants for MSI-X */
@@ -139,4 +156,27 @@ static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
int virtio_pci_modern_probe(struct virtio_pci_device *);
void virtio_pci_modern_remove(struct virtio_pci_device *);
+struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
+
+#define VIRTIO_LEGACY_ADMIN_CMD_BITMAP \
+ (BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO))
+
+/* Unlike modern drivers which support hardware virtio devices, legacy drivers
+ * assume software-based devices: e.g. they don't use proper memory barriers
+ * on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more
+ * or less by chance. For now, only support legacy IO on X86.
+ */
+#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
+#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_LEGACY_ADMIN_CMD_BITMAP
+#else
+#define VIRTIO_ADMIN_CMD_BITMAP 0
+#endif
+
+int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
+ struct virtio_admin_cmd *cmd);
+
#endif
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index ee6a386d250b..f62b530aa3b5 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -19,6 +19,8 @@
#define VIRTIO_RING_NO_LEGACY
#include "virtio_pci_common.h"
+#define VIRTIO_AVQ_SGS_MAX 4
+
static u64 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -26,6 +28,187 @@ static u64 vp_get_features(struct virtio_device *vdev)
return vp_modern_get_features(&vp_dev->mdev);
}
+static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ return index == vp_dev->admin_vq.vq_index;
+}
+
+static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
+ u16 opcode,
+ struct scatterlist **sgs,
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data)
+{
+ struct virtqueue *vq;
+ int ret, len;
+
+ vq = admin_vq->info.vq;
+ if (!vq)
+ return -EIO;
+
+ if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY &&
+ opcode != VIRTIO_ADMIN_CMD_LIST_USE &&
+ !((1ULL << opcode) & admin_vq->supported_cmds))
+ return -EOPNOTSUPP;
+
+ ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, GFP_KERNEL);
+ if (ret < 0)
+ return -EIO;
+
+ if (unlikely(!virtqueue_kick(vq)))
+ return -EIO;
+
+ while (!virtqueue_get_buf(vq, &len) &&
+ !virtqueue_is_broken(vq))
+ cpu_relax();
+
+ if (virtqueue_is_broken(vq))
+ return -EIO;
+
+ return 0;
+}
+
+int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
+ struct virtio_admin_cmd *cmd)
+{
+ struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat;
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_admin_cmd_status *va_status;
+ unsigned int out_num = 0, in_num = 0;
+ struct virtio_admin_cmd_hdr *va_hdr;
+ u16 status;
+ int ret;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return -EOPNOTSUPP;
+
+ va_status = kzalloc(sizeof(*va_status), GFP_KERNEL);
+ if (!va_status)
+ return -ENOMEM;
+
+ va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL);
+ if (!va_hdr) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ va_hdr->opcode = cmd->opcode;
+ va_hdr->group_type = cmd->group_type;
+ va_hdr->group_member_id = cmd->group_member_id;
+
+ /* Add header */
+ sg_init_one(&hdr, va_hdr, sizeof(*va_hdr));
+ sgs[out_num] = &hdr;
+ out_num++;
+
+ if (cmd->data_sg) {
+ sgs[out_num] = cmd->data_sg;
+ out_num++;
+ }
+
+ /* Add return status */
+ sg_init_one(&stat, va_status, sizeof(*va_status));
+ sgs[out_num + in_num] = &stat;
+ in_num++;
+
+ if (cmd->result_sg) {
+ sgs[out_num + in_num] = cmd->result_sg;
+ in_num++;
+ }
+
+ mutex_lock(&vp_dev->admin_vq.cmd_lock);
+ ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
+ le16_to_cpu(cmd->opcode),
+ sgs, out_num, in_num, sgs);
+ mutex_unlock(&vp_dev->admin_vq.cmd_lock);
+
+ if (ret) {
+ dev_err(&vdev->dev,
+ "Failed to execute command on admin vq: %d\n.", ret);
+ goto err_cmd_exec;
+ }
+
+ status = le16_to_cpu(va_status->status);
+ if (status != VIRTIO_ADMIN_STATUS_OK) {
+ dev_err(&vdev->dev,
+ "admin command error: status(%#x) qualifier(%#x)\n",
+ status, le16_to_cpu(va_status->status_qualifier));
+ ret = -status;
+ }
+
+err_cmd_exec:
+ kfree(va_hdr);
+err_alloc:
+ kfree(va_status);
+ return ret;
+}
+
+static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ struct scatterlist data_sg;
+ __le64 *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ sg_init_one(&result_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.result_sg = &result_sg;
+
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto end;
+
+ *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP);
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = NULL;
+
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto end;
+
+ vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data);
+end:
+ kfree(data);
+}
+
+static void vp_modern_avq_activate(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ __virtqueue_unbreak(admin_vq->info.vq);
+ virtio_pci_admin_cmd_list_init(vdev);
+}
+
+static void vp_modern_avq_deactivate(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ __virtqueue_break(admin_vq->info.vq);
+}
+
static void vp_transport_features(struct virtio_device *vdev, u64 features)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -37,6 +220,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
if (features & BIT_ULL(VIRTIO_F_RING_RESET))
__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
+
+ if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ))
+ __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ);
}
static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
@@ -69,6 +255,9 @@ static int vp_check_common_size(struct virtio_device *vdev)
if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
return -EINVAL;
+ if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num))
+ return -EINVAL;
+
return 0;
}
@@ -195,6 +384,8 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
/* We should never be setting status to 0. */
BUG_ON(status == 0);
vp_modern_set_status(&vp_dev->mdev, status);
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+ vp_modern_avq_activate(vdev);
}
static void vp_reset(struct virtio_device *vdev)
@@ -211,6 +402,9 @@ static void vp_reset(struct virtio_device *vdev)
*/
while (vp_modern_get_status(mdev))
msleep(1);
+
+ vp_modern_avq_deactivate(vdev);
+
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -345,6 +539,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
bool (*notify)(struct virtqueue *vq);
struct virtqueue *vq;
+ bool is_avq;
u16 num;
int err;
@@ -353,11 +548,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
else
notify = vp_notify;
- if (index >= vp_modern_get_num_queues(mdev))
+ is_avq = vp_is_avq(&vp_dev->vdev, index);
+ if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
return ERR_PTR(-EINVAL);
+ num = is_avq ?
+ VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index);
/* Check if queue is either not available or already active. */
- num = vp_modern_get_queue_size(mdev, index);
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
@@ -383,6 +580,12 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
goto err;
}
+ if (is_avq) {
+ mutex_lock(&vp_dev->admin_vq.cmd_lock);
+ vp_dev->admin_vq.info.vq = vq;
+ mutex_unlock(&vp_dev->admin_vq.cmd_lock);
+ }
+
return vq;
err:
@@ -418,6 +621,12 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ if (vp_is_avq(&vp_dev->vdev, vq->index)) {
+ mutex_lock(&vp_dev->admin_vq.cmd_lock);
+ vp_dev->admin_vq.info.vq = NULL;
+ mutex_unlock(&vp_dev->admin_vq.cmd_lock);
+ }
+
if (vp_dev->msix_enabled)
vp_modern_queue_vector(mdev, vq->index,
VIRTIO_MSI_NO_VECTOR);
@@ -527,6 +736,45 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
return true;
}
+static int vp_modern_create_avq(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq;
+ struct virtqueue *vq;
+ u16 admin_q_num;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return 0;
+
+ admin_q_num = vp_modern_avq_num(&vp_dev->mdev);
+ if (!admin_q_num)
+ return -EINVAL;
+
+ avq = &vp_dev->admin_vq;
+ avq->vq_index = vp_modern_avq_index(&vp_dev->mdev);
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL,
+ avq->name, NULL, VIRTIO_MSI_NO_VECTOR);
+ if (IS_ERR(vq)) {
+ dev_err(&vdev->dev, "failed to setup admin virtqueue, err=%ld",
+ PTR_ERR(vq));
+ return PTR_ERR(vq);
+ }
+
+ vp_modern_set_queue_enable(&vp_dev->mdev, avq->info.vq->index, true);
+ return 0;
+}
+
+static void vp_modern_destroy_avq(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ vp_dev->del_vq(&vp_dev->admin_vq.info);
+}
+
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get = NULL,
.set = NULL,
@@ -545,6 +793,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
+ .create_avq = vp_modern_create_avq,
+ .destroy_avq = vp_modern_destroy_avq,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -565,6 +815,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
+ .create_avq = vp_modern_create_avq,
+ .destroy_avq = vp_modern_destroy_avq,
};
/* the PCI probing function */
@@ -588,9 +840,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
+ vp_dev->is_avq = vp_is_avq;
vp_dev->isr = mdev->isr;
vp_dev->vdev.id = mdev->id;
+ mutex_init(&vp_dev->admin_vq.cmd_lock);
return 0;
}
@@ -598,5 +852,6 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ mutex_destroy(&vp_dev->admin_vq.cmd_lock);
vp_modern_remove(mdev);
}
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index 7de8b1ebabac..0d3dbfaf4b23 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -207,6 +207,10 @@ static inline void check_offsets(void)
offsetof(struct virtio_pci_modern_common_cfg, queue_notify_data));
BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_RESET !=
offsetof(struct virtio_pci_modern_common_cfg, queue_reset));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_ADM_Q_IDX !=
+ offsetof(struct virtio_pci_modern_common_cfg, admin_queue_index));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_ADM_Q_NUM !=
+ offsetof(struct virtio_pci_modern_common_cfg, admin_queue_num));
}
/*
@@ -296,7 +300,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
mdev->common = vp_modern_map_capability(mdev, common,
sizeof(struct virtio_pci_common_cfg), 4, 0,
offsetofend(struct virtio_pci_modern_common_cfg,
- queue_reset),
+ admin_queue_num),
&mdev->common_len, NULL);
if (!mdev->common)
goto err_map_common;
@@ -719,6 +723,24 @@ void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
}
EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
+u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+ return vp_ioread16(&cfg->admin_queue_num);
+}
+EXPORT_SYMBOL_GPL(vp_modern_avq_num);
+
+u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+ return vp_ioread16(&cfg->admin_queue_index);
+}
+EXPORT_SYMBOL_GPL(vp_modern_avq_index);
+
MODULE_VERSION("0.1");
MODULE_DESCRIPTION("Modern Virtio PCI Device");
MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");