diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 1098 |
1 files changed, 847 insertions, 251 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bd0d33125c18..7ed5302b511a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -26,6 +26,7 @@ * Jerome Glisse */ #include <linux/dma-fence-array.h> +#include <linux/interval_tree_generic.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -51,12 +52,23 @@ * SI supports 16. */ +#define START(node) ((node)->start) +#define LAST(node) ((node)->last) + +INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, + START, LAST, static, amdgpu_vm_it) + +#undef START +#undef LAST + /* Local structure. Encapsulate some VM table update parameters to reduce * the number of function parameters */ struct amdgpu_pte_update_params { /* amdgpu device we do this update for */ struct amdgpu_device *adev; + /* optional amdgpu_vm we do this update for */ + struct amdgpu_vm *vm; /* address where to copy page table entries from */ uint64_t src; /* indirect buffer to fill with commands */ @@ -64,33 +76,50 @@ struct amdgpu_pte_update_params { /* Function which actually does the update */ void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, - uint32_t flags); + uint64_t flags); /* indicate update pt or its shadow */ bool shadow; }; +/* Helper to disable partial resident texture feature from a fence callback */ +struct amdgpu_prt_cb { + struct amdgpu_device *adev; + struct dma_fence_cb cb; +}; + /** - * amdgpu_vm_num_pde - return the number of page directory entries + * amdgpu_vm_num_entries - return the number of entries in a PD/PT * * @adev: amdgpu_device pointer * - * Calculate the number of page directory entries. + * Calculate the number of entries in a page directory or page table. */ -static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) +static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, + unsigned level) { - return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; + if (level == 0) + /* For the root directory */ + return adev->vm_manager.max_pfn >> + (adev->vm_manager.block_size * + adev->vm_manager.num_level); + else if (level == adev->vm_manager.num_level) + /* For the page tables on the leaves */ + return AMDGPU_VM_PTE_COUNT(adev); + else + /* Everything in between */ + return 1 << adev->vm_manager.block_size; } /** - * amdgpu_vm_directory_size - returns the size of the page directory in bytes + * amdgpu_vm_bo_size - returns the size of the BOs in bytes * * @adev: amdgpu_device pointer * - * Calculate the size of the page directory in bytes. + * Calculate the size of the BO for a page directory or page table in bytes. */ -static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) +static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) { - return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); + return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); } /** @@ -107,15 +136,56 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry) { - entry->robj = vm->page_directory; + entry->robj = vm->root.bo; entry->priority = 0; - entry->tv.bo = &vm->page_directory->tbo; + entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; entry->user_pages = NULL; list_add(&entry->tv.head, validated); } /** + * amdgpu_vm_validate_layer - validate a single page table level + * + * @parent: parent page table level + * @validate: callback to do the validation + * @param: parameter for the validation callback + * + * Validate the page table BOs on command submission if neccessary. + */ +static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, + int (*validate)(void *, struct amdgpu_bo *), + void *param) +{ + unsigned i; + int r; + + if (!parent->entries) + return 0; + + for (i = 0; i <= parent->last_entry_used; ++i) { + struct amdgpu_vm_pt *entry = &parent->entries[i]; + + if (!entry->bo) + continue; + + r = validate(param, entry->bo); + if (r) + return r; + + /* + * Recurse into the sub directory. This is harmless because we + * have only a maximum of 5 layers. + */ + r = amdgpu_vm_validate_level(entry, validate, param); + if (r) + return r; + } + + return r; +} + +/** * amdgpu_vm_validate_pt_bos - validate the page table BOs * * @adev: amdgpu device pointer @@ -130,8 +200,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { uint64_t num_evictions; - unsigned i; - int r; /* We only need to validate the page tables * if they aren't already valid. @@ -140,19 +208,33 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (num_evictions == vm->last_eviction_counter) return 0; - /* add the vm page table to the list */ - for (i = 0; i <= vm->max_pde_used; ++i) { - struct amdgpu_bo *bo = vm->page_tables[i].bo; + return amdgpu_vm_validate_level(&vm->root, validate, param); +} + +/** + * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail + * + * @adev: amdgpu device instance + * @vm: vm providing the BOs + * + * Move the PT BOs to the tail of the LRU. + */ +static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent) +{ + unsigned i; + + if (!parent->entries) + return; + + for (i = 0; i <= parent->last_entry_used; ++i) { + struct amdgpu_vm_pt *entry = &parent->entries[i]; - if (!bo) + if (!entry->bo) continue; - r = validate(param, bo); - if (r) - return r; + ttm_bo_move_to_lru_tail(&entry->bo->tbo); + amdgpu_vm_move_level_in_lru(entry); } - - return 0; } /** @@ -167,25 +249,146 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct ttm_bo_global *glob = adev->mman.bdev.glob; - unsigned i; spin_lock(&glob->lru_lock); - for (i = 0; i <= vm->max_pde_used; ++i) { - struct amdgpu_bo *bo = vm->page_tables[i].bo; + amdgpu_vm_move_level_in_lru(&vm->root); + spin_unlock(&glob->lru_lock); +} - if (!bo) - continue; + /** + * amdgpu_vm_alloc_levels - allocate the PD/PT levels + * + * @adev: amdgpu_device pointer + * @vm: requested vm + * @saddr: start of the address range + * @eaddr: end of the address range + * + * Make sure the page directories and page tables are allocated + */ +static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_vm_pt *parent, + uint64_t saddr, uint64_t eaddr, + unsigned level) +{ + unsigned shift = (adev->vm_manager.num_level - level) * + adev->vm_manager.block_size; + unsigned pt_idx, from, to; + int r; + + if (!parent->entries) { + unsigned num_entries = amdgpu_vm_num_entries(adev, level); - ttm_bo_move_to_lru_tail(&bo->tbo); + parent->entries = drm_calloc_large(num_entries, + sizeof(struct amdgpu_vm_pt)); + if (!parent->entries) + return -ENOMEM; + memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt)); } - spin_unlock(&glob->lru_lock); + + from = saddr >> shift; + to = eaddr >> shift; + if (from >= amdgpu_vm_num_entries(adev, level) || + to >= amdgpu_vm_num_entries(adev, level)) + return -EINVAL; + + if (to > parent->last_entry_used) + parent->last_entry_used = to; + + ++level; + saddr = saddr & ((1 << shift) - 1); + eaddr = eaddr & ((1 << shift) - 1); + + /* walk over the address space and allocate the page tables */ + for (pt_idx = from; pt_idx <= to; ++pt_idx) { + struct reservation_object *resv = vm->root.bo->tbo.resv; + struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; + struct amdgpu_bo *pt; + + if (!entry->bo) { + r = amdgpu_bo_create(adev, + amdgpu_vm_bo_size(adev, level), + AMDGPU_GPU_PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | + AMDGPU_GEM_CREATE_VRAM_CLEARED, + NULL, resv, &pt); + if (r) + return r; + + /* Keep a reference to the root directory to avoid + * freeing them up in the wrong order. + */ + pt->parent = amdgpu_bo_ref(vm->root.bo); + + entry->bo = pt; + entry->addr = 0; + } + + if (level < adev->vm_manager.num_level) { + uint64_t sub_saddr = (pt_idx == from) ? saddr : 0; + uint64_t sub_eaddr = (pt_idx == to) ? eaddr : + ((1 << shift) - 1); + r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr, + sub_eaddr, level); + if (r) + return r; + } + } + + return 0; +} + +/** + * amdgpu_vm_alloc_pts - Allocate page tables. + * + * @adev: amdgpu_device pointer + * @vm: VM to allocate page tables for + * @saddr: Start address which needs to be allocated + * @size: Size from start address we need. + * + * Make sure the page tables are allocated. + */ +int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + uint64_t saddr, uint64_t size) +{ + uint64_t last_pfn; + uint64_t eaddr; + + /* validate the parameters */ + if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK) + return -EINVAL; + + eaddr = saddr + size - 1; + last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; + if (last_pfn >= adev->vm_manager.max_pfn) { + dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n", + last_pfn, adev->vm_manager.max_pfn); + return -EINVAL; + } + + saddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr /= AMDGPU_GPU_PAGE_SIZE; + + return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0); } -static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, - struct amdgpu_vm_id *id) +/** + * amdgpu_vm_had_gpu_reset - check if reset occured since last use + * + * @adev: amdgpu_device pointer + * @id: VMID structure + * + * Check if GPU reset occured since last use of the VMID. + */ +static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vm_id *id) { return id->current_gpu_reset_count != - atomic_read(&adev->gpu_reset_counter) ? true : false; + atomic_read(&adev->gpu_reset_counter); } /** @@ -271,7 +474,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Check all the prerequisites to using this VMID */ if (!id) continue; - if (amdgpu_vm_is_gpu_reset(adev, id)) + if (amdgpu_vm_had_gpu_reset(adev, id)) continue; if (atomic64_read(&id->owner) != vm->client_id) @@ -299,7 +502,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r) goto error; - id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); list_move_tail(&id->list, &adev->vm_manager.ids_lru); vm->ids[ring->idx] = id; @@ -320,9 +522,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r) goto error; - dma_fence_put(id->first); - id->first = dma_fence_get(fence); - dma_fence_put(id->last_flush); id->last_flush = NULL; @@ -369,6 +568,16 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) return false; } +static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr) +{ + u64 addr = mc_addr; + + if (adev->gart.gart_funcs->adjust_mc_addr) + addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr); + + return addr; +} + /** * amdgpu_vm_flush - hardware flush the vm * @@ -389,19 +598,31 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) id->gws_size != job->gws_size || id->oa_base != job->oa_base || id->oa_size != job->oa_size); + bool vm_flush_needed = job->vm_needs_flush || + amdgpu_vm_ring_has_compute_vm_bug(ring); + unsigned patch_offset = 0; int r; - if (ring->funcs->emit_pipeline_sync && ( - job->vm_needs_flush || gds_switch_needed || - amdgpu_vm_ring_has_compute_vm_bug(ring))) + if (amdgpu_vm_had_gpu_reset(adev, id)) { + gds_switch_needed = true; + vm_flush_needed = true; + } + + if (!vm_flush_needed && !gds_switch_needed) + return 0; + + if (ring->funcs->init_cond_exec) + patch_offset = amdgpu_ring_init_cond_exec(ring); + + if (ring->funcs->emit_pipeline_sync) amdgpu_ring_emit_pipeline_sync(ring); - if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || - amdgpu_vm_is_gpu_reset(adev, id))) { + if (ring->funcs->emit_vm_flush && vm_flush_needed) { + u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr); struct dma_fence *fence; - trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); - amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); + trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id); + amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr); r = amdgpu_fence_emit(ring, &fence); if (r) @@ -420,12 +641,20 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) id->gws_size = job->gws_size; id->oa_base = job->oa_base; id->oa_size = job->oa_size; - amdgpu_ring_emit_gds_switch(ring, job->vm_id, - job->gds_base, job->gds_size, - job->gws_base, job->gws_size, - job->oa_base, job->oa_size); + amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base, + job->gds_size, job->gws_base, + job->gws_size, job->oa_base, + job->oa_size); } + if (ring->funcs->patch_cond_exec) + amdgpu_ring_patch_cond_exec(ring, patch_offset); + + /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ + if (ring->funcs->emit_switch_buffer) { + amdgpu_ring_emit_switch_buffer(ring); + amdgpu_ring_emit_switch_buffer(ring); + } return 0; } @@ -490,7 +719,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, - uint32_t flags) + uint64_t flags) { trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); @@ -519,7 +748,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, - uint32_t flags) + uint64_t flags) { uint64_t src = (params->src + (addr >> 12) * 8); @@ -554,24 +783,24 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) } /* - * amdgpu_vm_update_pdes - make sure that page directory is valid + * amdgpu_vm_update_level - update a single level in the hierarchy * * @adev: amdgpu_device pointer * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range + * @parent: parent directory * - * Allocates new page tables if necessary - * and updates the page directory. + * Makes sure all entries in @parent are up to date. * Returns 0 for success, error for failure. */ -int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, - struct amdgpu_vm *vm) +static int amdgpu_vm_update_level(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_vm_pt *parent, + unsigned level) { struct amdgpu_bo *shadow; struct amdgpu_ring *ring; uint64_t pd_addr, shadow_addr; - uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; + uint32_t incr = amdgpu_vm_bo_size(adev, level + 1); uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0; unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; @@ -580,16 +809,19 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, int r; + if (!parent->entries) + return 0; ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - shadow = vm->page_directory->shadow; /* padding, etc. */ ndw = 64; /* assume the worst case */ - ndw += vm->max_pde_used * 6; + ndw += parent->last_entry_used * 6; - pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); + pd_addr = amdgpu_bo_gpu_offset(parent->bo); + + shadow = parent->bo->shadow; if (shadow) { r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); if (r) @@ -608,9 +840,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, params.adev = adev; params.ib = &job->ibs[0]; - /* walk over the address space and update the page directory */ - for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { - struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; + /* walk over the address space and update the directory */ + for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { + struct amdgpu_bo *bo = parent->entries[pt_idx].bo; uint64_t pde, pt; if (bo == NULL) @@ -626,10 +858,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } pt = amdgpu_bo_gpu_offset(bo); - if (vm->page_tables[pt_idx].addr == pt) + if (parent->entries[pt_idx].addr == pt) continue; - vm->page_tables[pt_idx].addr = pt; + parent->entries[pt_idx].addr = pt; pde = pd_addr + pt_idx * 8; if (((last_pde + 8 * count) != pde) || @@ -637,15 +869,18 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { if (count) { + uint64_t pt_addr = + amdgpu_vm_adjust_mc_addr(adev, last_pt); + if (shadow) amdgpu_vm_do_set_ptes(¶ms, last_shadow, - last_pt, count, + pt_addr, count, incr, AMDGPU_PTE_VALID); amdgpu_vm_do_set_ptes(¶ms, last_pde, - last_pt, count, incr, + pt_addr, count, incr, AMDGPU_PTE_VALID); } @@ -659,36 +894,51 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) { - if (vm->page_directory->shadow) - amdgpu_vm_do_set_ptes(¶ms, last_shadow, last_pt, + uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt); + + if (vm->root.bo->shadow) + amdgpu_vm_do_set_ptes(¶ms, last_shadow, pt_addr, count, incr, AMDGPU_PTE_VALID); - amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, + amdgpu_vm_do_set_ptes(¶ms, last_pde, pt_addr, count, incr, AMDGPU_PTE_VALID); } if (params.ib->length_dw == 0) { amdgpu_job_free(job); - return 0; - } - - amdgpu_ring_pad_ib(ring, params.ib); - amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, - AMDGPU_FENCE_OWNER_VM); - if (shadow) - amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv, + } else { + amdgpu_ring_pad_ib(ring, params.ib); + amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); + if (shadow) + amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv, + AMDGPU_FENCE_OWNER_VM); - WARN_ON(params.ib->length_dw > ndw); - r = amdgpu_job_submit(job, ring, &vm->entity, - AMDGPU_FENCE_OWNER_VM, &fence); - if (r) - goto error_free; + WARN_ON(params.ib->length_dw > ndw); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); + if (r) + goto error_free; - amdgpu_bo_fence(vm->page_directory, fence, true); - dma_fence_put(vm->page_directory_fence); - vm->page_directory_fence = dma_fence_get(fence); - dma_fence_put(fence); + amdgpu_bo_fence(parent->bo, fence, true); + dma_fence_put(vm->last_dir_update); + vm->last_dir_update = dma_fence_get(fence); + dma_fence_put(fence); + } + /* + * Recurse into the subdirectories. This recursion is harmless because + * we only have a maximum of 5 layers. + */ + for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { + struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; + + if (!entry->bo) + continue; + + r = amdgpu_vm_update_level(adev, vm, entry, level + 1); + if (r) + return r; + } return 0; @@ -697,6 +947,47 @@ error_free: return r; } +/* + * amdgpu_vm_update_directories - make sure that all directories are valid + * + * @adev: amdgpu_device pointer + * @vm: requested vm + * + * Makes sure all directories are up to date. + * Returns 0 for success, error for failure. + */ +int amdgpu_vm_update_directories(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ + return amdgpu_vm_update_level(adev, vm, &vm->root, 0); +} + +/** + * amdgpu_vm_find_pt - find the page table for an address + * + * @p: see amdgpu_pte_update_params definition + * @addr: virtual address in question + * + * Find the page table BO for a virtual address, return NULL when none found. + */ +static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p, + uint64_t addr) +{ + struct amdgpu_vm_pt *entry = &p->vm->root; + unsigned idx, level = p->adev->vm_manager.num_level; + + while (entry->entries) { + idx = addr >> (p->adev->vm_manager.block_size * level--); + idx %= amdgpu_bo_size(entry->bo) / 8; + entry = &entry->entries[idx]; + } + + if (level) + return NULL; + + return entry->bo; +} + /** * amdgpu_vm_update_ptes - make sure that page tables are valid * @@ -710,23 +1001,26 @@ error_free: * Update the page tables in the range @start - @end. */ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, - struct amdgpu_vm *vm, uint64_t start, uint64_t end, - uint64_t dst, uint32_t flags) + uint64_t dst, uint64_t flags) { - const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; + struct amdgpu_device *adev = params->adev; + const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1; uint64_t cur_pe_start, cur_nptes, cur_dst; uint64_t addr; /* next GPU address to be updated */ - uint64_t pt_idx; struct amdgpu_bo *pt; unsigned nptes; /* next number of ptes to be updated */ uint64_t next_pe_start; /* initialize the variables */ addr = start; - pt_idx = addr >> amdgpu_vm_block_size; - pt = vm->page_tables[pt_idx].bo; + pt = amdgpu_vm_get_pt(params, addr); + if (!pt) { + pr_err("PT not found, aborting update_ptes\n"); + return; + } + if (params->shadow) { if (!pt->shadow) return; @@ -735,7 +1029,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else - nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); cur_pe_start = amdgpu_bo_gpu_offset(pt); cur_pe_start += (addr & mask) * 8; @@ -748,8 +1042,12 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, /* walk over the address space and update the page tables */ while (addr < end) { - pt_idx = addr >> amdgpu_vm_block_size; - pt = vm->page_tables[pt_idx].bo; + pt = amdgpu_vm_get_pt(params, addr); + if (!pt) { + pr_err("PT not found, aborting update_ptes\n"); + return; + } + if (params->shadow) { if (!pt->shadow) return; @@ -759,7 +1057,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else - nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); next_pe_start = amdgpu_bo_gpu_offset(pt); next_pe_start += (addr & mask) * 8; @@ -800,9 +1098,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, * @flags: hw mapping flags */ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, - struct amdgpu_vm *vm, uint64_t start, uint64_t end, - uint64_t dst, uint32_t flags) + uint64_t dst, uint64_t flags) { /** * The MC L1 TLB supports variable sized pages, based on a fragment @@ -834,25 +1131,25 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, if (params->src || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { - amdgpu_vm_update_ptes(params, vm, start, end, dst, flags); + amdgpu_vm_update_ptes(params, start, end, dst, flags); return; } /* handle the 4K area at the beginning */ if (start != frag_start) { - amdgpu_vm_update_ptes(params, vm, start, frag_start, + amdgpu_vm_update_ptes(params, start, frag_start, dst, flags); dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE; } /* handle the area in the middle */ - amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst, + amdgpu_vm_update_ptes(params, frag_start, frag_end, dst, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != end) { dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE; - amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags); + amdgpu_vm_update_ptes(params, frag_end, end, dst, flags); } } @@ -879,7 +1176,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, dma_addr_t *pages_addr, struct amdgpu_vm *vm, uint64_t start, uint64_t last, - uint32_t flags, uint64_t addr, + uint64_t flags, uint64_t addr, struct dma_fence **fence) { struct amdgpu_ring *ring; @@ -892,14 +1189,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; + params.vm = vm; params.src = src; ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - memset(¶ms, 0, sizeof(params)); - params.adev = adev; - params.src = src; - /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) owner = AMDGPU_FENCE_OWNER_UNDEFINED; @@ -910,7 +1204,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, * reserve space for one command every (1 << BLOCK_SIZE) * entries or 2k dwords (whatever is smaller) */ - ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; + ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1; /* padding, etc. */ ndw = 64; @@ -967,19 +1261,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, + r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv, owner); if (r) goto error_free; - r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); + r = reservation_object_reserve_shared(vm->root.bo->tbo.resv); if (r) goto error_free; params.shadow = true; - amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); + amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); params.shadow = false; - amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); + amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); amdgpu_ring_pad_ib(ring, params.ib); WARN_ON(params.ib->length_dw > ndw); @@ -988,12 +1282,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_bo_fence(vm->page_directory, f, true); - if (fence) { - dma_fence_put(*fence); - *fence = dma_fence_get(f); - } - dma_fence_put(f); + amdgpu_bo_fence(vm->root.bo, f, true); + dma_fence_put(*fence); + *fence = f; return 0; error_free: @@ -1020,15 +1311,15 @@ error_free: */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct dma_fence *exclusive, - uint32_t gtt_flags, + uint64_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, - uint32_t flags, + uint64_t flags, struct drm_mm_node *nodes, struct dma_fence **fence) { - uint64_t pfn, src = 0, start = mapping->it.start; + uint64_t pfn, src = 0, start = mapping->start; int r; /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here @@ -1039,6 +1330,12 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) flags &= ~AMDGPU_PTE_WRITEABLE; + flags &= ~AMDGPU_PTE_EXECUTABLE; + flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + + flags &= ~AMDGPU_PTE_MTYPE_MASK; + flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK); + trace_amdgpu_vm_bo_update(mapping); pfn = mapping->offset >> PAGE_SHIFT; @@ -1074,7 +1371,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } addr += pfn << PAGE_SHIFT; - last = min((uint64_t)mapping->it.last, start + max_entries - 1); + last = min((uint64_t)mapping->last, start + max_entries - 1); r = amdgpu_vm_bo_update_mapping(adev, exclusive, src, pages_addr, vm, start, last, flags, addr, @@ -1089,7 +1386,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } start = last + 1; - } while (unlikely(start != mapping->it.last + 1)); + } while (unlikely(start != mapping->last + 1)); return 0; } @@ -1111,13 +1408,13 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; - uint32_t gtt_flags, flags; + uint64_t gtt_flags, flags; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; struct dma_fence *exclusive; int r; - if (clear) { + if (clear || !bo_va->bo) { mem = NULL; nodes = NULL; exclusive = NULL; @@ -1134,9 +1431,15 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); } - flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); - gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && - adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0; + if (bo_va->bo) { + flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); + gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && + adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? + flags : 0; + } else { + flags = 0x0; + gtt_flags = ~0x0; + } spin_lock(&vm->status_lock); if (!list_empty(&bo_va->vm_status)) @@ -1171,10 +1474,142 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } /** + * amdgpu_vm_update_prt_state - update the global PRT state + */ +static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) +{ + unsigned long flags; + bool enable; + + spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); + enable = !!atomic_read(&adev->vm_manager.num_prt_users); + adev->gart.gart_funcs->set_prt(adev, enable); + spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); +} + +/** + * amdgpu_vm_prt_get - add a PRT user + */ +static void amdgpu_vm_prt_get(struct amdgpu_device *adev) +{ + if (!adev->gart.gart_funcs->set_prt) + return; + + if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) + amdgpu_vm_update_prt_state(adev); +} + +/** + * amdgpu_vm_prt_put - drop a PRT user + */ +static void amdgpu_vm_prt_put(struct amdgpu_device *adev) +{ + if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) + amdgpu_vm_update_prt_state(adev); +} + +/** + * amdgpu_vm_prt_cb - callback for updating the PRT status + */ +static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) +{ + struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); + + amdgpu_vm_prt_put(cb->adev); + kfree(cb); +} + +/** + * amdgpu_vm_add_prt_cb - add callback for updating the PRT status + */ +static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, + struct dma_fence *fence) +{ + struct amdgpu_prt_cb *cb; + + if (!adev->gart.gart_funcs->set_prt) + return; + + cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); + if (!cb) { + /* Last resort when we are OOM */ + if (fence) + dma_fence_wait(fence, false); + + amdgpu_vm_prt_put(adev); + } else { + cb->adev = adev; + if (!fence || dma_fence_add_callback(fence, &cb->cb, + amdgpu_vm_prt_cb)) + amdgpu_vm_prt_cb(fence, &cb->cb); + } +} + +/** + * amdgpu_vm_free_mapping - free a mapping + * + * @adev: amdgpu_device pointer + * @vm: requested vm + * @mapping: mapping to be freed + * @fence: fence of the unmap operation + * + * Free a mapping and make sure we decrease the PRT usage count if applicable. + */ +static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_bo_va_mapping *mapping, + struct dma_fence *fence) +{ + if (mapping->flags & AMDGPU_PTE_PRT) + amdgpu_vm_add_prt_cb(adev, fence); + kfree(mapping); +} + +/** + * amdgpu_vm_prt_fini - finish all prt mappings + * + * @adev: amdgpu_device pointer + * @vm: requested vm + * + * Register a cleanup callback to disable PRT support after VM dies. + */ +static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) +{ + struct reservation_object *resv = vm->root.bo->tbo.resv; + struct dma_fence *excl, **shared; + unsigned i, shared_count; + int r; + + r = reservation_object_get_fences_rcu(resv, &excl, + &shared_count, &shared); + if (r) { + /* Not enough memory to grab the fence list, as last resort + * block for all the fences to complete. + */ + reservation_object_wait_timeout_rcu(resv, true, false, + MAX_SCHEDULE_TIMEOUT); + return; + } + + /* Add a callback for each fence in the reservation object */ + amdgpu_vm_prt_get(adev); + amdgpu_vm_add_prt_cb(adev, excl); + + for (i = 0; i < shared_count; ++i) { + amdgpu_vm_prt_get(adev); + amdgpu_vm_add_prt_cb(adev, shared[i]); + } + + kfree(shared); +} + +/** * amdgpu_vm_clear_freed - clear freed BOs in the PT * * @adev: amdgpu_device pointer * @vm: requested vm + * @fence: optional resulting fence (unchanged if no work needed to be done + * or if an error occurred) * * Make sure all freed BOs are cleared in the PT. * Returns 0 for success. @@ -1182,9 +1617,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, * PTs have to be reserved and mutex must be locked! */ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, - struct amdgpu_vm *vm) + struct amdgpu_vm *vm, + struct dma_fence **fence) { struct amdgpu_bo_va_mapping *mapping; + struct dma_fence *f = NULL; int r; while (!list_empty(&vm->freed)) { @@ -1193,12 +1630,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, list_del(&mapping->list); r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping, - 0, 0, NULL); - kfree(mapping); - if (r) + 0, 0, &f); + amdgpu_vm_free_mapping(adev, vm, mapping, f); + if (r) { + dma_fence_put(f); return r; + } + } + if (fence && f) { + dma_fence_put(*fence); + *fence = f; + } else { + dma_fence_put(f); } + return 0; } @@ -1271,7 +1717,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->vm_status); - list_add_tail(&bo_va->bo_list, &bo->va); + if (bo) + list_add_tail(&bo_va->bo_list, &bo->va); return bo_va; } @@ -1295,12 +1742,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t saddr, uint64_t offset, uint64_t size, uint64_t flags) { - struct amdgpu_bo_va_mapping *mapping; + struct amdgpu_bo_va_mapping *mapping, *tmp; struct amdgpu_vm *vm = bo_va->vm; - struct interval_tree_node *it; - unsigned last_pfn, pt_idx; uint64_t eaddr; - int r; /* validate the parameters */ if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || @@ -1309,93 +1753,103 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* make sure object fit at this offset */ eaddr = saddr + size - 1; - if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) - return -EINVAL; - - last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; - if (last_pfn >= adev->vm_manager.max_pfn) { - dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n", - last_pfn, adev->vm_manager.max_pfn); + if (saddr >= eaddr || + (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) return -EINVAL; - } saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; - it = interval_tree_iter_first(&vm->va, saddr, eaddr); - if (it) { - struct amdgpu_bo_va_mapping *tmp; - tmp = container_of(it, struct amdgpu_bo_va_mapping, it); + tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); + if (tmp) { /* bo and tmp overlap, invalid addr */ dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " - "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, - tmp->it.start, tmp->it.last + 1); - r = -EINVAL; - goto error; + "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr, + tmp->start, tmp->last + 1); + return -EINVAL; } mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); - if (!mapping) { - r = -ENOMEM; - goto error; - } + if (!mapping) + return -ENOMEM; INIT_LIST_HEAD(&mapping->list); - mapping->it.start = saddr; - mapping->it.last = eaddr; + mapping->start = saddr; + mapping->last = eaddr; mapping->offset = offset; mapping->flags = flags; list_add(&mapping->list, &bo_va->invalids); - interval_tree_insert(&mapping->it, &vm->va); - - /* Make sure the page tables are allocated */ - saddr >>= amdgpu_vm_block_size; - eaddr >>= amdgpu_vm_block_size; + amdgpu_vm_it_insert(mapping, &vm->va); - BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); + if (flags & AMDGPU_PTE_PRT) + amdgpu_vm_prt_get(adev); - if (eaddr > vm->max_pde_used) - vm->max_pde_used = eaddr; + return 0; +} - /* walk over the address space and allocate the page tables */ - for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { - struct reservation_object *resv = vm->page_directory->tbo.resv; - struct amdgpu_bo *pt; +/** + * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings + * + * @adev: amdgpu_device pointer + * @bo_va: bo_va to store the address + * @saddr: where to map the BO + * @offset: requested offset in the BO + * @flags: attributes of pages (read/write/valid/etc.) + * + * Add a mapping of the BO at the specefied addr into the VM. Replace existing + * mappings as we do so. + * Returns 0 for success, error for failure. + * + * Object has to be reserved and unreserved outside! + */ +int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + uint64_t saddr, uint64_t offset, + uint64_t size, uint64_t flags) +{ + struct amdgpu_bo_va_mapping *mapping; + struct amdgpu_vm *vm = bo_va->vm; + uint64_t eaddr; + int r; - if (vm->page_tables[pt_idx].bo) - continue; + /* validate the parameters */ + if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || + size == 0 || size & AMDGPU_GPU_PAGE_MASK) + return -EINVAL; - r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, - AMDGPU_GPU_PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS | - AMDGPU_GEM_CREATE_SHADOW | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED, - NULL, resv, &pt); - if (r) - goto error_free; + /* make sure object fit at this offset */ + eaddr = saddr + size - 1; + if (saddr >= eaddr || + (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) + return -EINVAL; - /* Keep a reference to the page table to avoid freeing - * them up in the wrong order. - */ - pt->parent = amdgpu_bo_ref(vm->page_directory); + /* Allocate all the needed memory */ + mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) + return -ENOMEM; - vm->page_tables[pt_idx].bo = pt; - vm->page_tables[pt_idx].addr = 0; + r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size); + if (r) { + kfree(mapping); + return r; } - return 0; + saddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr /= AMDGPU_GPU_PAGE_SIZE; -error_free: - list_del(&mapping->list); - interval_tree_remove(&mapping->it, &vm->va); - trace_amdgpu_vm_bo_unmap(bo_va, mapping); - kfree(mapping); + mapping->start = saddr; + mapping->last = eaddr; + mapping->offset = offset; + mapping->flags = flags; -error: - return r; + list_add(&mapping->list, &bo_va->invalids); + amdgpu_vm_it_insert(mapping, &vm->va); + + if (flags & AMDGPU_PTE_PRT) + amdgpu_vm_prt_get(adev); + + return 0; } /** @@ -1421,7 +1875,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, saddr /= AMDGPU_GPU_PAGE_SIZE; list_for_each_entry(mapping, &bo_va->valids, list) { - if (mapping->it.start == saddr) + if (mapping->start == saddr) break; } @@ -1429,7 +1883,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, valid = false; list_for_each_entry(mapping, &bo_va->invalids, list) { - if (mapping->it.start == saddr) + if (mapping->start == saddr) break; } @@ -1438,13 +1892,113 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, } list_del(&mapping->list); - interval_tree_remove(&mapping->it, &vm->va); + amdgpu_vm_it_remove(mapping, &vm->va); trace_amdgpu_vm_bo_unmap(bo_va, mapping); if (valid) list_add(&mapping->list, &vm->freed); else - kfree(mapping); + amdgpu_vm_free_mapping(adev, vm, mapping, + bo_va->last_pt_update); + + return 0; +} + +/** + * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range + * + * @adev: amdgpu_device pointer + * @vm: VM structure to use + * @saddr: start of the range + * @size: size of the range + * + * Remove all mappings in a range, split them as appropriate. + * Returns 0 for success, error for failure. + */ +int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + uint64_t saddr, uint64_t size) +{ + struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; + LIST_HEAD(removed); + uint64_t eaddr; + + eaddr = saddr + size - 1; + saddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr /= AMDGPU_GPU_PAGE_SIZE; + + /* Allocate all the needed memory */ + before = kzalloc(sizeof(*before), GFP_KERNEL); + if (!before) + return -ENOMEM; + INIT_LIST_HEAD(&before->list); + + after = kzalloc(sizeof(*after), GFP_KERNEL); + if (!after) { + kfree(before); + return -ENOMEM; + } + INIT_LIST_HEAD(&after->list); + + /* Now gather all removed mappings */ + tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); + while (tmp) { + /* Remember mapping split at the start */ + if (tmp->start < saddr) { + before->start = tmp->start; + before->last = saddr - 1; + before->offset = tmp->offset; + before->flags = tmp->flags; + list_add(&before->list, &tmp->list); + } + + /* Remember mapping split at the end */ + if (tmp->last > eaddr) { + after->start = eaddr + 1; + after->last = tmp->last; + after->offset = tmp->offset; + after->offset += after->start - tmp->start; + after->flags = tmp->flags; + list_add(&after->list, &tmp->list); + } + + list_del(&tmp->list); + list_add(&tmp->list, &removed); + + tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); + } + + /* And free them up */ + list_for_each_entry_safe(tmp, next, &removed, list) { + amdgpu_vm_it_remove(tmp, &vm->va); + list_del(&tmp->list); + + if (tmp->start < saddr) + tmp->start = saddr; + if (tmp->last > eaddr) + tmp->last = eaddr; + + list_add(&tmp->list, &vm->freed); + trace_amdgpu_vm_bo_unmap(NULL, tmp); + } + + /* Insert partial mapping before the range */ + if (!list_empty(&before->list)) { + amdgpu_vm_it_insert(before, &vm->va); + if (before->flags & AMDGPU_PTE_PRT) + amdgpu_vm_prt_get(adev); + } else { + kfree(before); + } + + /* Insert partial mapping after the range */ + if (!list_empty(&after->list)) { + amdgpu_vm_it_insert(after, &vm->va); + if (after->flags & AMDGPU_PTE_PRT) + amdgpu_vm_prt_get(adev); + } else { + kfree(after); + } return 0; } @@ -1473,14 +2027,15 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); - interval_tree_remove(&mapping->it, &vm->va); + amdgpu_vm_it_remove(mapping, &vm->va); trace_amdgpu_vm_bo_unmap(bo_va, mapping); list_add(&mapping->list, &vm->freed); } list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { list_del(&mapping->list); - interval_tree_remove(&mapping->it, &vm->va); - kfree(mapping); + amdgpu_vm_it_remove(mapping, &vm->va); + amdgpu_vm_free_mapping(adev, vm, mapping, + bo_va->last_pt_update); } dma_fence_put(bo_va->last_pt_update); @@ -1509,6 +2064,44 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, } } +static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) +{ + /* Total bits covered by PD + PTs */ + unsigned bits = ilog2(vm_size) + 18; + + /* Make sure the PD is 4K in size up to 8GB address space. + Above that split equal between PD and PTs */ + if (vm_size <= 8) + return (bits - 9); + else + return ((bits + 3) / 2); +} + +/** + * amdgpu_vm_adjust_size - adjust vm size and block size + * + * @adev: amdgpu_device pointer + * @vm_size: the default vm size if it's set auto + */ +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size) +{ + /* adjust vm size firstly */ + if (amdgpu_vm_size == -1) + adev->vm_manager.vm_size = vm_size; + else + adev->vm_manager.vm_size = amdgpu_vm_size; + + /* block size depends on vm size */ + if (amdgpu_vm_block_size == -1) + adev->vm_manager.block_size = + amdgpu_vm_get_block_size(adev->vm_manager.vm_size); + else + adev->vm_manager.block_size = amdgpu_vm_block_size; + + DRM_INFO("vm size is %llu GB, block size is %u-bit\n", + adev->vm_manager.vm_size, adev->vm_manager.block_size); +} + /** * amdgpu_vm_init - initialize a vm instance * @@ -1520,8 +2113,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, - AMDGPU_VM_PTE_COUNT * 8); - unsigned pd_size, pd_entries; + AMDGPU_VM_PTE_COUNT(adev) * 8); unsigned ring_instance; struct amdgpu_ring *ring; struct amd_sched_rq *rq; @@ -1536,16 +2128,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); - pd_size = amdgpu_vm_directory_size(adev); - pd_entries = amdgpu_vm_num_pdes(adev); - - /* allocate page table array */ - vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); - if (vm->page_tables == NULL) { - DRM_ERROR("Cannot allocate memory for page table array\n"); - return -ENOMEM; - } - /* create scheduler entity for page table updates */ ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); @@ -1555,44 +2137,64 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) r = amd_sched_entity_init(&ring->sched, &vm->entity, rq, amdgpu_sched_jobs); if (r) - goto err; + return r; - vm->page_directory_fence = NULL; + vm->last_dir_update = NULL; - r = amdgpu_bo_create(adev, pd_size, align, true, + r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_SHADOW | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CLEARED, - NULL, NULL, &vm->page_directory); + NULL, NULL, &vm->root.bo); if (r) goto error_free_sched_entity; - r = amdgpu_bo_reserve(vm->page_directory, false); + r = amdgpu_bo_reserve(vm->root.bo, false); if (r) - goto error_free_page_directory; + goto error_free_root; vm->last_eviction_counter = atomic64_read(&adev->num_evictions); - amdgpu_bo_unreserve(vm->page_directory); + amdgpu_bo_unreserve(vm->root.bo); return 0; -error_free_page_directory: - amdgpu_bo_unref(&vm->page_directory->shadow); - amdgpu_bo_unref(&vm->page_directory); - vm->page_directory = NULL; +error_free_root: + amdgpu_bo_unref(&vm->root.bo->shadow); + amdgpu_bo_unref(&vm->root.bo); + vm->root.bo = NULL; error_free_sched_entity: amd_sched_entity_fini(&ring->sched, &vm->entity); -err: - drm_free_large(vm->page_tables); - return r; } /** + * amdgpu_vm_free_levels - free PD/PT levels + * + * @level: PD/PT starting level to free + * + * Free the page directory or page table level and all sub levels. + */ +static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level) +{ + unsigned i; + + if (level->bo) { + amdgpu_bo_unref(&level->bo->shadow); + amdgpu_bo_unref(&level->bo); + } + + if (level->entries) + for (i = 0; i <= level->last_entry_used; i++) + amdgpu_vm_free_levels(&level->entries[i]); + + drm_free_large(level->entries); +} + +/** * amdgpu_vm_fini - tear down a vm instance * * @adev: amdgpu_device pointer @@ -1604,37 +2206,30 @@ err: void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va_mapping *mapping, *tmp; - int i; + bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; amd_sched_entity_fini(vm->entity.sched, &vm->entity); if (!RB_EMPTY_ROOT(&vm->va)) { dev_err(adev->dev, "still active bo inside vm\n"); } - rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { + rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) { list_del(&mapping->list); - interval_tree_remove(&mapping->it, &vm->va); + amdgpu_vm_it_remove(mapping, &vm->va); kfree(mapping); } list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { - list_del(&mapping->list); - kfree(mapping); - } - - for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { - struct amdgpu_bo *pt = vm->page_tables[i].bo; - - if (!pt) - continue; + if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { + amdgpu_vm_prt_fini(adev, vm); + prt_fini_needed = false; + } - amdgpu_bo_unref(&pt->shadow); - amdgpu_bo_unref(&pt); + list_del(&mapping->list); + amdgpu_vm_free_mapping(adev, vm, mapping, NULL); } - drm_free_large(vm->page_tables); - amdgpu_bo_unref(&vm->page_directory->shadow); - amdgpu_bo_unref(&vm->page_directory); - dma_fence_put(vm->page_directory_fence); + amdgpu_vm_free_levels(&vm->root); + dma_fence_put(vm->last_dir_update); } /** @@ -1665,6 +2260,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); atomic64_set(&adev->vm_manager.client_counter, 0); + spin_lock_init(&adev->vm_manager.prt_lock); + atomic_set(&adev->vm_manager.num_prt_users, 0); } /** @@ -1681,7 +2278,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_NUM_VM; ++i) { struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; - dma_fence_put(adev->vm_manager.ids[i].first); amdgpu_sync_free(&adev->vm_manager.ids[i].active); dma_fence_put(id->flushed_updates); dma_fence_put(id->last_flush); |