summaryrefslogtreecommitdiff
path: root/drivers/vdpa/mlx5/core/mr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vdpa/mlx5/core/mr.c')
-rw-r--r--drivers/vdpa/mlx5/core/mr.c209
1 files changed, 126 insertions, 83 deletions
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 5a1971fcd87b..2197c46e563a 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -301,10 +301,13 @@ static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
sg_free_table(&mr->sg_head);
}
-static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
+static int add_direct_chain(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr,
+ u64 start,
+ u64 size,
+ u8 perm,
struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n;
LIST_HEAD(tmp);
@@ -354,9 +357,10 @@ err_alloc:
* indirect memory key that provides access to the enitre address space given
* by iotlb.
*/
-static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int create_user_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr,
+ struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n;
struct vhost_iotlb_map *map;
@@ -384,7 +388,7 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb
LOG_MAX_KLM_SIZE);
mr->num_klms += nnuls;
}
- err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+ err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
if (err)
goto err_chain;
}
@@ -393,7 +397,7 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb
pperm = map->perm;
}
}
- err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+ err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
if (err)
goto err_chain;
@@ -450,20 +454,23 @@ static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
mlx5_vdpa_destroy_mkey(mvdev, mr->mkey);
}
-static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
+static int dup_iotlb(struct vhost_iotlb *dst, struct vhost_iotlb *src)
{
struct vhost_iotlb_map *map;
u64 start = 0, last = ULLONG_MAX;
int err;
+ if (dst == src)
+ return -EINVAL;
+
if (!src) {
- err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW);
+ err = vhost_iotlb_add_range(dst, start, last, start, VHOST_ACCESS_RW);
return err;
}
for (map = vhost_iotlb_itree_first(src, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
- err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last,
+ err = vhost_iotlb_add_range(dst, map->start, map->last,
map->addr, map->perm);
if (err)
return err;
@@ -471,9 +478,9 @@ static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
return 0;
}
-static void prune_iotlb(struct mlx5_vdpa_dev *mvdev)
+static void prune_iotlb(struct vhost_iotlb *iotlb)
{
- vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX);
+ vhost_iotlb_del_range(iotlb, 0, ULLONG_MAX);
}
static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
@@ -489,133 +496,169 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
}
}
-static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
- return;
+ if (mr->user_mr)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
- prune_iotlb(mvdev);
+ vhost_iotlb_free(mr->iotlb);
}
-static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
-
- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
+ if (!mr)
return;
- if (!mr->initialized)
- return;
+ mutex_lock(&mvdev->mr_mtx);
- if (mr->user_mr)
- destroy_user_mr(mvdev, mr);
- else
- destroy_dma_mr(mvdev, mr);
+ _mlx5_vdpa_destroy_mr(mvdev, mr);
+
+ for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) {
+ if (mvdev->mr[i] == mr)
+ mvdev->mr[i] = NULL;
+ }
- mr->initialized = false;
+ mutex_unlock(&mvdev->mr_mtx);
+
+ kfree(mr);
}
-void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *new_mr,
+ unsigned int asid)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid];
- mutex_lock(&mr->mkey_mtx);
+ mutex_lock(&mvdev->mr_mtx);
- _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
- _mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
+ mvdev->mr[asid] = new_mr;
+ if (old_mr) {
+ _mlx5_vdpa_destroy_mr(mvdev, old_mr);
+ kfree(old_mr);
+ }
- mutex_unlock(&mr->mkey_mtx);
-}
+ mutex_unlock(&mvdev->mr_mtx);
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
-{
- mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
- mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
}
-static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
- struct vhost_iotlb *iotlb,
- unsigned int asid)
+void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
{
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
- return 0;
+ for (int i = 0; i < MLX5_VDPA_NUM_AS; i++)
+ mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[i]);
- return dup_iotlb(mvdev, iotlb);
+ prune_iotlb(mvdev->cvq.iotlb);
}
-static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
- struct vhost_iotlb *iotlb,
- unsigned int asid)
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr,
+ struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err;
- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
- return 0;
-
- if (mr->initialized)
- return 0;
-
if (iotlb)
- err = create_user_mr(mvdev, iotlb);
+ err = create_user_mr(mvdev, mr, iotlb);
else
err = create_dma_mr(mvdev, mr);
if (err)
return err;
- mr->initialized = true;
+ mr->iotlb = vhost_iotlb_alloc(0, 0);
+ if (!mr->iotlb) {
+ err = -ENOMEM;
+ goto err_mr;
+ }
+
+ err = dup_iotlb(mr->iotlb, iotlb);
+ if (err)
+ goto err_iotlb;
return 0;
+
+err_iotlb:
+ vhost_iotlb_free(mr->iotlb);
+
+err_mr:
+ if (iotlb)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+
+ return err;
}
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
- struct vhost_iotlb *iotlb, unsigned int asid)
+struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb)
{
+ struct mlx5_vdpa_mr *mr;
int err;
- err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
- if (err)
- return err;
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&mvdev->mr_mtx);
+ err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
+ mutex_unlock(&mvdev->mr_mtx);
- err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
if (err)
goto out_err;
- return 0;
+ return mr;
out_err:
- _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
-
- return err;
+ kfree(mr);
+ return ERR_PTR(err);
}
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
- unsigned int asid)
+int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb,
+ unsigned int asid)
{
int err;
- mutex_lock(&mvdev->mr.mkey_mtx);
- err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
- mutex_unlock(&mvdev->mr.mkey_mtx);
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
+ return 0;
+
+ spin_lock(&mvdev->cvq.iommu_lock);
+
+ prune_iotlb(mvdev->cvq.iotlb);
+ err = dup_iotlb(mvdev->cvq.iotlb, iotlb);
+
+ spin_unlock(&mvdev->cvq.iommu_lock);
+
return err;
}
-int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
- bool *change_map, unsigned int asid)
+int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
- int err = 0;
+ struct mlx5_vdpa_mr *mr;
+
+ mr = mlx5_vdpa_create_mr(mvdev, NULL);
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
- *change_map = false;
- mutex_lock(&mr->mkey_mtx);
- if (mr->initialized) {
- mlx5_vdpa_info(mvdev, "memory map update\n");
- *change_map = true;
+ mlx5_vdpa_update_mr(mvdev, mr, 0);
+
+ return mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, 0);
+}
+
+int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+{
+ if (asid >= MLX5_VDPA_NUM_AS)
+ return -EINVAL;
+
+ mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[asid]);
+
+ if (asid == 0 && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ if (mlx5_vdpa_create_dma_mr(mvdev))
+ mlx5_vdpa_warn(mvdev, "create DMA MR failed\n");
+ } else {
+ mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, asid);
}
- if (!*change_map)
- err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
- mutex_unlock(&mr->mkey_mtx);
- return err;
+ return 0;
}