From 03facb39d6c6433a78d0f79c7a146b1e6a61943e Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 31 Jan 2024 07:08:54 -0800 Subject: drm/msm/gem: Fix double resv lock aquire MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since commit 79e2cf2e7a19 ("drm/gem: Take reservation lock for vmap/vunmap operations"), the resv lock is already held in the prime vmap path, so don't try to grab it again. v2: This applies to vunmap path as well v3: Fix fixes commit Fixes: 79e2cf2e7a19 ("drm/gem: Take reservation lock for vmap/vunmap operations") Signed-off-by: Rob Clark Acked-by: Christian König Patchwork: https://patchwork.freedesktop.org/patch/576642/ --- drivers/gpu/drm/msm/msm_gem_prime.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 5f68e31a3e4e..0915f3b68752 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -26,7 +26,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { void *vaddr; - vaddr = msm_gem_get_vaddr(obj); + vaddr = msm_gem_get_vaddr_locked(obj); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); iosys_map_set_vaddr(map, vaddr); @@ -36,7 +36,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { - msm_gem_put_vaddr(obj); + msm_gem_put_vaddr_locked(obj); } struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, -- cgit v1.2.3 From 6a0dbcd20ef252ebf98af94186a2e53da7167bed Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Tue, 9 Jan 2024 22:41:08 +0200 Subject: drm/msm/a6xx: set highest_bank_bit to 13 for a610 During the testing of Gnome on Qualcomm Robotics platform screen corruption has been observed. Lowering GPU's highest_bank_bit from 14 to 13 seems to fix the screen corruption. Note, the MDSS and DPU drivers use HBB=1 (which maps to the highest_bank_bit = 14). So this change merely works around the UBWC swizzling issue on this platform until the real cause is found. Fixes: e7fc9398e608 ("drm/msm/a6xx: Add A610 support") Signed-off-by: Dmitry Baryshkov Patchwork: https://patchwork.freedesktop.org/patch/573838/ Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index c0bc924cd302..c9c55e2ea584 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1287,7 +1287,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) gpu->ubwc_config.highest_bank_bit = 15; if (adreno_is_a610(gpu)) { - gpu->ubwc_config.highest_bank_bit = 14; + gpu->ubwc_config.highest_bank_bit = 13; gpu->ubwc_config.min_acc_len = 1; gpu->ubwc_config.ubwc_mode = 1; } -- cgit v1.2.3 From 917e9b7c2350e3e53162fcf5035e5f2d68e2cbed Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Tue, 9 Jan 2024 10:22:17 -0800 Subject: Revert "drm/msm/gpu: Push gpu lock down past runpm" This reverts commit abe2023b4cea192ab266b351fd38dc9dbd846df0. Changing the locking order means that scheduler/msm_job_run() can race with the recovery kthread worker, with the result that the GPU gets an extra runpm get when we are trying to power it off. Leaving the GPU in an unrecovered state. I'll need to come up with a different scheme for appeasing lockdep. Signed-off-by: Rob Clark Patchwork: https://patchwork.freedesktop.org/patch/573835/ --- drivers/gpu/drm/msm/msm_gpu.c | 11 +++++------ drivers/gpu/drm/msm/msm_ringbuffer.c | 7 +++++-- 2 files changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 095390774f22..655002b21b0d 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned long flags; - pm_runtime_get_sync(&gpu->pdev->dev); + WARN_ON(!mutex_is_locked(&gpu->lock)); - mutex_lock(&gpu->lock); + pm_runtime_get_sync(&gpu->pdev->dev); msm_gpu_hw_init(gpu); + submit->seqno = submit->hw_fence->seqno; + update_sw_cntrs(gpu); /* @@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) gpu->funcs->submit(gpu, submit); gpu->cur_ctx_seqno = submit->queue->ctx->seqno; - hangcheck_timer_reset(gpu); - - mutex_unlock(&gpu->lock); - pm_runtime_put(&gpu->pdev->dev); + hangcheck_timer_reset(gpu); } /* diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 4bc13f7d005a..9d6655f96f0c 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) msm_fence_init(submit->hw_fence, fctx); - submit->seqno = submit->hw_fence->seqno; - mutex_lock(&priv->lru.lock); for (i = 0; i < submit->nr_bos; i++) { @@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) mutex_unlock(&priv->lru.lock); + /* TODO move submit path over to using a per-ring lock.. */ + mutex_lock(&gpu->lock); + msm_gpu_submit(gpu, submit); + mutex_unlock(&gpu->lock); + return dma_fence_get(submit->hw_fence); } -- cgit v1.2.3 From 8c7bfd8262319fd3f127a5380f593ea76f1b88a2 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Tue, 13 Feb 2024 09:23:40 -0800 Subject: drm/msm: Wire up tlb ops The brute force iommu_flush_iotlb_all() was good enough for unmap, but in some cases a map operation could require removing a table pte entry to replace with a block entry. This also requires tlb invalidation. Missing this was resulting an obscure iova fault on what should be a valid buffer address. Thanks to Robin Murphy for helping me understand the cause of the fault. Cc: Robin Murphy Cc: stable@vger.kernel.org Fixes: b145c6e65eb0 ("drm/msm: Add support to create a local pagetable") Signed-off-by: Rob Clark Patchwork: https://patchwork.freedesktop.org/patch/578117/ --- drivers/gpu/drm/msm/msm_iommu.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 5cc8d358cc97..d5512037c38b 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -21,6 +21,8 @@ struct msm_iommu_pagetable { struct msm_mmu base; struct msm_mmu *parent; struct io_pgtable_ops *pgtbl_ops; + const struct iommu_flush_ops *tlb; + struct device *iommu_dev; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ phys_addr_t ttbr; u32 asid; @@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = { static void msm_iommu_tlb_flush_all(void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, @@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, { } -static const struct iommu_flush_ops null_tlb_ops = { +static const struct iommu_flush_ops tlb_ops = { .tlb_flush_all = msm_iommu_tlb_flush_all, .tlb_flush_walk = msm_iommu_tlb_flush_walk, .tlb_add_page = msm_iommu_tlb_add_page, @@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* The incoming cfg will have the TTBR1 quirk enabled */ ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1; - ttbr0_cfg.tlb = &null_tlb_ops; + ttbr0_cfg.tlb = &tlb_ops; pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, - &ttbr0_cfg, iommu->domain); + &ttbr0_cfg, pagetable); if (!pagetable->pgtbl_ops) { kfree(pagetable); @@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* Needed later for TLB flush */ pagetable->parent = parent; + pagetable->tlb = ttbr1_cfg->tlb; + pagetable->iommu_dev = ttbr1_cfg->iommu_dev; pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap; pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; -- cgit v1.2.3