diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 219 |
1 files changed, 140 insertions, 79 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c2ec91cc845d..19cab37ac69c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -199,12 +199,12 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) struct nouveau_bo * nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, - u32 tile_mode, u32 tile_flags) + u32 tile_mode, u32 tile_flags, bool internal) { struct nouveau_drm *drm = cli->drm; struct nouveau_bo *nvbo; struct nvif_mmu *mmu = &cli->mmu; - struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; + struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm; int i, pi = -1; if (!*size) { @@ -215,6 +215,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); if (!nvbo) return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->vma_list); @@ -232,68 +233,103 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, nvbo->force_coherent = true; } - if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { - nvbo->kind = (tile_flags & 0x0000ff00) >> 8; - if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { - kfree(nvbo); - return ERR_PTR(-EINVAL); + nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); + if (!nouveau_cli_uvmm(cli) || internal) { + /* for BO noVM allocs, don't assign kinds */ + if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { + nvbo->kind = (tile_flags & 0x0000ff00) >> 8; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + + nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; + } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + nvbo->kind = (tile_flags & 0x00007f00) >> 8; + nvbo->comp = (tile_flags & 0x00030000) >> 16; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + } else { + nvbo->zeta = (tile_flags & 0x00000007); } + nvbo->mode = tile_mode; + + /* Determine the desirable target GPU page size for the buffer. */ + for (i = 0; i < vmm->page_nr; i++) { + /* Because we cannot currently allow VMM maps to fail + * during buffer migration, we need to determine page + * size for the buffer up-front, and pre-allocate its + * page tables. + * + * Skip page sizes that can't support needed domains. + */ + if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && + (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) + continue; + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && + (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) + continue; - nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; - } else - if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - nvbo->kind = (tile_flags & 0x00007f00) >> 8; - nvbo->comp = (tile_flags & 0x00030000) >> 16; - if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + /* Select this page size if it's the first that supports + * the potential memory domains, or when it's compatible + * with the requested compression settings. + */ + if (pi < 0 || !nvbo->comp || vmm->page[i].comp) + pi = i; + + /* Stop once the buffer is larger than the current page size. */ + if (*size >= 1ULL << vmm->page[i].shift) + break; + } + + if (WARN_ON(pi < 0)) { kfree(nvbo); return ERR_PTR(-EINVAL); } - } else { - nvbo->zeta = (tile_flags & 0x00000007); - } - nvbo->mode = tile_mode; - nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); - - /* Determine the desirable target GPU page size for the buffer. */ - for (i = 0; i < vmm->page_nr; i++) { - /* Because we cannot currently allow VMM maps to fail - * during buffer migration, we need to determine page - * size for the buffer up-front, and pre-allocate its - * page tables. - * - * Skip page sizes that can't support needed domains. - */ - if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && - (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) - continue; - if ((domain & NOUVEAU_GEM_DOMAIN_GART) && - (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) - continue; - /* Select this page size if it's the first that supports - * the potential memory domains, or when it's compatible - * with the requested compression settings. - */ - if (pi < 0 || !nvbo->comp || vmm->page[i].comp) - pi = i; - - /* Stop once the buffer is larger than the current page size. */ - if (*size >= 1ULL << vmm->page[i].shift) - break; - } + /* Disable compression if suitable settings couldn't be found. */ + if (nvbo->comp && !vmm->page[pi].comp) { + if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) + nvbo->kind = mmu->kind[nvbo->kind]; + nvbo->comp = 0; + } + nvbo->page = vmm->page[pi].shift; + } else { + /* reject other tile flags when in VM mode. */ + if (tile_mode) + return ERR_PTR(-EINVAL); + if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG) + return ERR_PTR(-EINVAL); - if (WARN_ON(pi < 0)) { - kfree(nvbo); - return ERR_PTR(-EINVAL); - } + /* Determine the desirable target GPU page size for the buffer. */ + for (i = 0; i < vmm->page_nr; i++) { + /* Because we cannot currently allow VMM maps to fail + * during buffer migration, we need to determine page + * size for the buffer up-front, and pre-allocate its + * page tables. + * + * Skip page sizes that can't support needed domains. + */ + if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) + continue; + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && + (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) + continue; - /* Disable compression if suitable settings couldn't be found. */ - if (nvbo->comp && !vmm->page[pi].comp) { - if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) - nvbo->kind = mmu->kind[nvbo->kind]; - nvbo->comp = 0; + if (pi < 0) + pi = i; + /* Stop once the buffer is larger than the current page size. */ + if (*size >= 1ULL << vmm->page[i].shift) + break; + } + if (WARN_ON(pi < 0)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + nvbo->page = vmm->page[pi].shift; } - nvbo->page = vmm->page[pi].shift; nouveau_bo_fixup_align(nvbo, align, size); @@ -306,18 +342,26 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, { int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; int ret; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false, + .resv = robj, + }; nouveau_bo_placement_set(nvbo, domain, 0); INIT_LIST_HEAD(&nvbo->io_reserve_lru); - ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type, - &nvbo->placement, align >> PAGE_SHIFT, false, + ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type, + &nvbo->placement, align >> PAGE_SHIFT, &ctx, sg, robj, nouveau_bo_del_ttm); if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ return ret; } + if (!robj) + ttm_bo_unreserve(&nvbo->bo); + return 0; } @@ -331,7 +375,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, int ret; nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, - tile_flags); + tile_flags, true); if (IS_ERR(nvbo)) return PTR_ERR(nvbo); @@ -339,6 +383,11 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, dma_resv_init(&nvbo->bo.base._resv); drm_vma_node_reset(&nvbo->bo.base.vma_node); + /* This must be called before ttm_bo_init_reserved(). Subsequent + * bo_move() callbacks might already iterate the GEMs GPUVA list. + */ + drm_gem_gpuva_init(&nvbo->bo.base); + ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); if (ret) return ret; @@ -817,29 +866,39 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, mutex_lock(&cli->mutex); else mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); + ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); - if (ret == 0) { - ret = drm->ttm.move(chan, bo, bo->resource, new_reg); - if (ret == 0) { - ret = nouveau_fence_new(chan, false, &fence); - if (ret == 0) { - /* TODO: figure out a better solution here - * - * wait on the fence here explicitly as going through - * ttm_bo_move_accel_cleanup somehow doesn't seem to do it. - * - * Without this the operation can timeout and we'll fallback to a - * software copy, which might take several minutes to finish. - */ - nouveau_fence_wait(fence, false, false); - ret = ttm_bo_move_accel_cleanup(bo, - &fence->base, - evict, false, - new_reg); - nouveau_fence_unref(&fence); - } - } + if (ret) + goto out_unlock; + + ret = drm->ttm.move(chan, bo, bo->resource, new_reg); + if (ret) + goto out_unlock; + + ret = nouveau_fence_new(&fence); + if (ret) + goto out_unlock; + + ret = nouveau_fence_emit(fence, chan); + if (ret) { + nouveau_fence_unref(&fence); + goto out_unlock; } + + /* TODO: figure out a better solution here + * + * wait on the fence here explicitly as going through + * ttm_bo_move_accel_cleanup somehow doesn't seem to do it. + * + * Without this the operation can timeout and we'll fallback to a + * software copy, which might take several minutes to finish. + */ + nouveau_fence_wait(fence, false, false); + ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, + new_reg); + nouveau_fence_unref(&fence); + +out_unlock: mutex_unlock(&cli->mutex); return ret; } @@ -935,6 +994,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, list_for_each_entry(vma, &nvbo->vma_list, head) { nouveau_vma_map(vma, mem); } + nouveau_uvmm_bo_map_all(nvbo, mem); } else { list_for_each_entry(vma, &nvbo->vma_list, head) { ret = dma_resv_wait_timeout(bo->base.resv, @@ -943,6 +1003,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, WARN_ON(ret <= 0); nouveau_vma_unmap(vma); } + nouveau_uvmm_bo_unmap_all(nvbo); } if (new_reg) |