summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx5
diff options
context:
space:
mode:
authorMoshe Shemesh <moshe@nvidia.com>2023-10-25 20:49:59 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-01-10 19:16:55 +0300
commit04ebb29dc9aabd4c71f5f796e9fa7fee7c477456 (patch)
tree217929f19255a2dc16255b618a0ef380f594d0b6 /drivers/infiniband/hw/mlx5
parenteaab31dceb114bc69a38e0ad00fab88138b76b6e (diff)
downloadlinux-04ebb29dc9aabd4c71f5f796e9fa7fee7c477456.tar.xz
RDMA/mlx5: Fix mkey cache WQ flush
[ Upstream commit a53e215f90079f617360439b1b6284820731e34c ] The cited patch tries to ensure no pending works on the mkey cache workqueue by disabling adding new works and call flush_workqueue(). But this workqueue also has delayed works which might still be pending the delay time to be queued. Add cancel_delayed_work() for the delayed works which waits to be queued and then the flush_workqueue() will flush all works which are already queued and running. Fixes: 374012b00457 ("RDMA/mlx5: Fix mkey cache possible deadlock on cleanup") Link: https://lore.kernel.org/r/b8722f14e7ed81452f791764a26d2ed4cfa11478.1698256179.git.leon@kernel.org Signed-off-by: Moshe Shemesh <moshe@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8a3762d9ff58..e0629898c3c0 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1026,11 +1026,13 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
return;
mutex_lock(&dev->cache.rb_lock);
+ cancel_delayed_work(&dev->cache.remove_ent_dwork);
for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys);
ent->disabled = true;
xa_unlock_irq(&ent->mkeys);
+ cancel_delayed_work(&ent->dwork);
}
mutex_unlock(&dev->cache.rb_lock);