summaryrefslogtreecommitdiff
path: root/drivers/vdpa/mlx5
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vdpa/mlx5')
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h2
-rw-r--r--drivers/vdpa/mlx5/core/mr.c95
-rw-r--r--drivers/vdpa/mlx5/core/resources.c7
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c67
4 files changed, 132 insertions, 39 deletions
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index b6cc53ba980c..0002b2136b48 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -35,12 +35,14 @@ struct mlx5_vdpa_mr {
/* serialize mkey creation and destruction */
struct mutex mkey_mtx;
+ bool user_mr;
};
struct mlx5_vdpa_resources {
u32 pdn;
struct mlx5_uars_page *uar;
void __iomem *kick_addr;
+ u64 phys_kick_addr;
u16 uid;
u32 null_mkey;
bool valid;
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 800cfd1967ad..dcee6039e966 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -219,11 +219,6 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m
mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
}
-static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev)
-{
- return &mvdev->mdev->pdev->dev;
-}
-
static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
struct vhost_iotlb *iotlb)
{
@@ -239,7 +234,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
u64 pa;
u64 paend;
struct scatterlist *sg;
- struct device *dma = get_dma_device(mvdev);
+ struct device *dma = mvdev->vdev.dma_dev;
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -298,7 +293,7 @@ err_map:
static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
- struct device *dma = get_dma_device(mvdev);
+ struct device *dma = mvdev->vdev.dma_dev;
destroy_direct_mr(mvdev, mr);
dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
@@ -360,7 +355,7 @@ err_alloc:
* indirect memory key that provides access to the enitre address space given
* by iotlb.
*/
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
@@ -374,9 +369,6 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
int err = 0;
int nnuls;
- if (mr->initialized)
- return 0;
-
INIT_LIST_HEAD(&mr->head);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
@@ -414,7 +406,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
if (err)
goto err_chain;
- mr->initialized = true;
+ mr->user_mr = true;
return 0;
err_chain:
@@ -426,33 +418,94 @@ err_chain:
return err;
}
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
+ if (!err)
+ mr->user_mr = false;
+
+ kfree(in);
+ return err;
+}
+
+static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey);
+}
+
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err;
- mutex_lock(&mr->mkey_mtx);
+ if (mr->initialized)
+ return 0;
+
+ if (iotlb)
+ err = create_user_mr(mvdev, iotlb);
+ else
+ err = create_dma_mr(mvdev, mr);
+
+ if (!err)
+ mr->initialized = true;
+
+ return err;
+}
+
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ int err;
+
+ mutex_lock(&mvdev->mr.mkey_mtx);
err = _mlx5_vdpa_create_mr(mvdev, iotlb);
- mutex_unlock(&mr->mkey_mtx);
+ mutex_unlock(&mvdev->mr.mkey_mtx);
return err;
}
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n;
- mutex_lock(&mr->mkey_mtx);
- if (!mr->initialized)
- goto out;
-
destroy_indirect_key(mvdev, mr);
list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
list_del_init(&dmr->list);
unmap_direct_mr(mvdev, dmr);
kfree(dmr);
}
+}
+
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+
+ mutex_lock(&mr->mkey_mtx);
+ if (!mr->initialized)
+ goto out;
+
+ if (mr->user_mr)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+
memset(mr, 0, sizeof(*mr));
mr->initialized = false;
out:
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index 6521cbd0f5c2..d4606213f88a 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -54,6 +54,9 @@ static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
void *in;
int err;
+ if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0))
+ return 0;
+
/* 0 means not supported */
if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
return -EOPNOTSUPP;
@@ -79,6 +82,9 @@ static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+ if (!uid)
+ return;
+
MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
MLX5_SET(destroy_uctx_in, in, uid, uid);
@@ -247,6 +253,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
goto err_key;
kick_addr = mdev->bar_addr + offset;
+ res->phys_kick_addr = kick_addr;
res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
if (!res->kick_addr) {
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index dda5dc6f7737..2a31467f7ac5 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -611,8 +611,8 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
}
-static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
- struct mlx5_vdpa_umem **umemp)
+static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
+ struct mlx5_vdpa_umem **umemp)
{
struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
int p_a;
@@ -635,7 +635,7 @@ static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
*umemp = &mvq->umem3;
break;
}
- return p_a * mvq->num_ent + p_b;
+ (*umemp)->size = p_a * mvq->num_ent + p_b;
}
static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
@@ -651,15 +651,10 @@ static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
void *in;
int err;
__be64 *pas;
- int size;
struct mlx5_vdpa_umem *umem;
- size = umem_size(ndev, mvq, num, &umem);
- if (size < 0)
- return size;
-
- umem->size = size;
- err = umem_frag_buf_alloc(ndev, umem, size);
+ set_umem_size(ndev, mvq, num, &umem);
+ err = umem_frag_buf_alloc(ndev, umem, umem->size);
if (err)
return err;
@@ -829,9 +824,9 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
- MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size);
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
- MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
@@ -1428,8 +1423,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
return -EINVAL;
}
- mvq->used_idx = state->avail_index;
- mvq->avail_idx = state->avail_index;
+ mvq->used_idx = state->split.avail_index;
+ mvq->avail_idx = state->split.avail_index;
return 0;
}
@@ -1450,7 +1445,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
* Since both values should be identical, we take the value of
* used_idx which is reported correctly.
*/
- state->avail_index = mvq->used_idx;
+ state->split.avail_index = mvq->used_idx;
return 0;
}
@@ -1459,7 +1454,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
return err;
}
- state->avail_index = attr.used_index;
+ state->split.avail_index = attr.used_index;
return 0;
}
@@ -1772,6 +1767,14 @@ out:
mutex_unlock(&ndev->reslock);
}
+static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = 0; i < ndev->mvdev.max_vqs; i++)
+ ndev->vqs[i].ready = false;
+}
+
static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -1782,10 +1785,15 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev);
+ clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0;
++mvdev->generation;
+ if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ if (mlx5_vdpa_create_mr(mvdev, NULL))
+ mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ }
return;
}
@@ -1866,6 +1874,7 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
ndev = to_mlx5_vdpa_ndev(mvdev);
free_resources(ndev);
+ mlx5_vdpa_destroy_mr(mvdev);
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
@@ -1876,8 +1885,22 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct vdpa_notification_area ret = {};
+ struct mlx5_vdpa_net *ndev;
+ phys_addr_t addr;
+
+ /* If SF BAR size is smaller than PAGE_SIZE, do not use direct
+ * notification to avoid the risk of mapping pages that contain BAR of more
+ * than one SF
+ */
+ if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT)
+ return ret;
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr;
+ ret.addr = addr;
+ ret.size = PAGE_SIZE;
return ret;
}
@@ -2037,14 +2060,20 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
goto err_mtu;
}
- mvdev->vdev.dma_dev = mdev->device;
+ mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_mpfs;
+ if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ err = mlx5_vdpa_create_mr(mvdev, NULL);
+ if (err)
+ goto err_res;
+ }
+
err = alloc_resources(ndev);
if (err)
- goto err_res;
+ goto err_mr;
mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
@@ -2056,6 +2085,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
err_reg:
free_resources(ndev);
+err_mr:
+ mlx5_vdpa_destroy_mr(mvdev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
err_mpfs: