summaryrefslogtreecommitdiff
path: root/drivers/misc/habanalabs/common/memory.c
diff options
context:
space:
mode:
authorOded Gabbay <ogabbay@kernel.org>2022-09-05 17:14:45 +0300
committerOded Gabbay <ogabbay@kernel.org>2022-09-19 15:08:39 +0300
commit82736b063fde67ea2a9b16ef5acf3d5db03e2deb (patch)
treed0ddfb95f9eeef1e6e83da5a8e85fefc960d7123 /drivers/misc/habanalabs/common/memory.c
parent6f0818c9fc9b81d8a303a8d3fb1826d71777f7ed (diff)
downloadlinux-82736b063fde67ea2a9b16ef5acf3d5db03e2deb.tar.xz
habanalabs: MMU invalidation h/w is per device
The code used the mmu mutex to protect access to the context's page tables and invalidation of the MMU cache. Because pgt are per context, the mmu mutex was a member of the context object. The problem is that the device has a single MMU invalidation h/w (per MMU). Therefore, the mmu mutex should not be a property of the context but a property of the device. Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
Diffstat (limited to 'drivers/misc/habanalabs/common/memory.c')
-rw-r--r--drivers/misc/habanalabs/common/memory.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 0a653fff08d4..096fa3c1ae95 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -1210,18 +1210,18 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
goto va_block_err;
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
goto map_err;
}
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc)
goto map_err;
@@ -1362,7 +1362,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
else
vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
@@ -1375,7 +1375,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
phys_pg_pack->total_size);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
/*
* If the context is closing we don't need to check for the MMU cache
@@ -2771,13 +2771,13 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
unmap_device_va(ctx, &args, true);
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
/* invalidate the cache once after the unmapping loop */
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
INIT_LIST_HEAD(&free_list);