summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
diff options
context:
space:
mode:
authorFelix Kuehling <Felix.Kuehling@amd.com>2021-03-17 07:24:12 +0300
committerAlex Deucher <alexander.deucher@amd.com>2021-04-21 04:48:43 +0300
commit48ff079b28d82dbce000cc45c0fd35b6ae9ffbda (patch)
tree02ff15ece3c28e3af18c2b9ded1acf8cb41420eb /drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
parent0b0e518d61af8e1cb73cbbfb313b215640c8a6f3 (diff)
downloadlinux-48ff079b28d82dbce000cc45c0fd35b6ae9ffbda.tar.xz
drm/amdkfd: HMM migrate vram to ram
If CPU page fault happens, HMM pgmap_ops callback migrate_to_ram start migrate memory from vram to ram in steps: 1. migrate_vma_pages get vram pages, and notify HMM to invalidate the pages, HMM interval notifier callback evict process queues 2. Allocate system memory pages 3. Use svm copy memory to migrate data from vram to ram 4. migrate_vma_pages copy pages structure from vram pages to ram pages 5. Return VM_FAULT_SIGBUS if migration failed, to notify application 6. migrate_vma_finalize put vram pages, page_free callback free vram pages and vram nodes 7. Restore work wait for migration is finished, then update GPU page table mapping to system memory, and resume process queues Signed-off-by: Philip Yang <Philip.Yang@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_migrate.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c302
1 files changed, 299 insertions, 3 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 7b025c169935..73c10dad0489 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -191,7 +191,7 @@ out_unlock:
* 0 - success
* otherwise - error code from dma fence signal
*/
-int
+static int
svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
{
int r = 0;
@@ -260,6 +260,35 @@ svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
put_page(page);
}
+static unsigned long
+svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
+{
+ unsigned long addr;
+
+ addr = page_to_pfn(page) << PAGE_SHIFT;
+ return (addr - adev->kfd.dev->pgmap.range.start);
+}
+
+static struct page *
+svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct page *page;
+
+ page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
+ if (page)
+ lock_page(page);
+
+ return page;
+}
+
+void svm_migrate_put_sys_page(unsigned long addr)
+{
+ struct page *page;
+
+ page = pfn_to_page(addr >> PAGE_SHIFT);
+ unlock_page(page);
+ put_page(page);
+}
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
@@ -512,13 +541,213 @@ int svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc)
static void svm_migrate_page_free(struct page *page)
{
+ /* Keep this function to avoid warning */
+}
+
+static int
+svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
+ struct migrate_vma *migrate, struct dma_fence **mfence,
+ dma_addr_t *scratch)
+{
+ uint64_t npages = migrate->cpages;
+ struct device *dev = adev->dev;
+ uint64_t *src;
+ dma_addr_t *dst;
+ struct page *dpage;
+ uint64_t i = 0, j;
+ uint64_t addr;
+ int r = 0;
+
+ pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
+ prange->last);
+
+ addr = prange->start << PAGE_SHIFT;
+
+ src = (uint64_t *)(scratch + npages);
+ dst = scratch;
+
+ for (i = 0, j = 0; i < npages; i++, j++, addr += PAGE_SIZE) {
+ struct page *spage;
+
+ spage = migrate_pfn_to_page(migrate->src[i]);
+ if (!spage) {
+ pr_debug("failed get spage svms 0x%p [0x%lx 0x%lx]\n",
+ prange->svms, prange->start, prange->last);
+ r = -ENOMEM;
+ goto out_oom;
+ }
+ src[i] = svm_migrate_addr(adev, spage);
+ if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
+ r = svm_migrate_copy_memory_gart(adev, dst + i - j,
+ src + i - j, j,
+ FROM_VRAM_TO_RAM,
+ mfence);
+ if (r)
+ goto out_oom;
+ j = 0;
+ }
+
+ dpage = svm_migrate_get_sys_page(migrate->vma, addr);
+ if (!dpage) {
+ pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
+ prange->svms, prange->start, prange->last);
+ r = -ENOMEM;
+ goto out_oom;
+ }
+
+ dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ r = dma_mapping_error(dev, dst[i]);
+ if (r) {
+ pr_debug("failed %d dma_map_page\n", r);
+ goto out_oom;
+ }
+
+ pr_debug("dma mapping dst to 0x%llx, page_to_pfn 0x%lx\n",
+ dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
+
+ migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
+ migrate->dst[i] |= MIGRATE_PFN_LOCKED;
+ }
+
+ r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
+ FROM_VRAM_TO_RAM, mfence);
+
+out_oom:
+ if (r) {
+ pr_debug("failed %d copy to ram\n", r);
+ while (i--) {
+ svm_migrate_put_sys_page(dst[i]);
+ migrate->dst[i] = 0;
+ }
+ }
+
+ return r;
+}
+
+static int
+svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
+ struct vm_area_struct *vma, uint64_t start, uint64_t end)
+{
+ uint64_t npages = (end - start) >> PAGE_SHIFT;
+ struct dma_fence *mfence = NULL;
+ struct migrate_vma migrate;
+ dma_addr_t *scratch;
+ size_t size;
+ void *buf;
+ int r = -ENOMEM;
+
+ memset(&migrate, 0, sizeof(migrate));
+ migrate.vma = vma;
+ migrate.start = start;
+ migrate.end = end;
+ migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+ migrate.pgmap_owner = adev;
+
+ size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
+ size *= npages;
+ buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!buf)
+ goto out;
+
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+ scratch = (dma_addr_t *)(migrate.dst + npages);
+
+ r = migrate_vma_setup(&migrate);
+ if (r) {
+ pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
+ r, prange->svms, prange->start, prange->last);
+ goto out_free;
+ }
+
+ pr_debug("cpages %ld\n", migrate.cpages);
+
+ if (migrate.cpages) {
+ svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
+ scratch);
+ migrate_vma_pages(&migrate);
+ svm_migrate_copy_done(adev, mfence);
+ migrate_vma_finalize(&migrate);
+ } else {
+ pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
+ prange->start, prange->last);
+ }
+
+ svm_range_dma_unmap(adev->dev, scratch, 0, npages);
+
+out_free:
+ kvfree(buf);
+out:
+ return r;
+}
+
+/**
+ * svm_migrate_vram_to_ram - migrate svm range from device to system
+ * @prange: range structure
+ * @mm: process mm, use current->mm if NULL
+ *
+ * Context: Process context, caller hold mmap read lock, svms lock, prange lock
+ *
+ * Return:
+ * 0 - OK, otherwise error code
+ */
+int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
+{
+ struct amdgpu_device *adev;
+ struct vm_area_struct *vma;
+ unsigned long addr;
+ unsigned long start;
+ unsigned long end;
+ int r = 0;
+
+ if (!prange->actual_loc) {
+ pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
+ prange->start, prange->last);
+ return 0;
+ }
+
+ adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
+ if (!adev) {
+ pr_debug("failed to get device by id 0x%x\n",
+ prange->actual_loc);
+ return -ENODEV;
+ }
+
+ pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
+ prange->svms, prange, prange->start, prange->last,
+ prange->actual_loc);
+
+ start = prange->start << PAGE_SHIFT;
+ end = (prange->last + 1) << PAGE_SHIFT;
+
+ for (addr = start; addr < end;) {
+ unsigned long next;
+
+ vma = find_vma(mm, addr);
+ if (!vma || addr < vma->vm_start)
+ break;
+
+ next = min(vma->vm_end, end);
+ r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
+ if (r) {
+ pr_debug("failed %d to migrate\n", r);
+ break;
+ }
+ addr = next;
+ }
+
+ if (!r) {
+ svm_range_vram_node_free(prange);
+ prange->actual_loc = 0;
+ }
+ return r;
}
/**
* svm_migrate_to_ram - CPU page fault handler
* @vmf: CPU vm fault vma, address
*
- * Context: vm fault handler, mm->mmap_sem is taken
+ * Context: vm fault handler, caller holds the mmap read lock
*
* Return:
* 0 - OK
@@ -526,7 +755,74 @@ static void svm_migrate_page_free(struct page *page)
*/
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
- return VM_FAULT_SIGBUS;
+ unsigned long addr = vmf->address;
+ struct vm_area_struct *vma;
+ enum svm_work_list_ops op;
+ struct svm_range *parent;
+ struct svm_range *prange;
+ struct kfd_process *p;
+ struct mm_struct *mm;
+ int r = 0;
+
+ vma = vmf->vma;
+ mm = vma->vm_mm;
+
+ p = kfd_lookup_process_by_mm(vma->vm_mm);
+ if (!p) {
+ pr_debug("failed find process at fault address 0x%lx\n", addr);
+ return VM_FAULT_SIGBUS;
+ }
+ addr >>= PAGE_SHIFT;
+ pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
+
+ mutex_lock(&p->svms.lock);
+
+ prange = svm_range_from_addr(&p->svms, addr, &parent);
+ if (!prange) {
+ pr_debug("cannot find svm range at 0x%lx\n", addr);
+ r = -EFAULT;
+ goto out;
+ }
+
+ mutex_lock(&parent->migrate_mutex);
+ if (prange != parent)
+ mutex_lock_nested(&prange->migrate_mutex, 1);
+
+ if (!prange->actual_loc)
+ goto out_unlock_prange;
+
+ svm_range_lock(parent);
+ if (prange != parent)
+ mutex_lock_nested(&prange->lock, 1);
+ r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
+ if (prange != parent)
+ mutex_unlock(&prange->lock);
+ svm_range_unlock(parent);
+ if (r) {
+ pr_debug("failed %d to split range by granularity\n", r);
+ goto out_unlock_prange;
+ }
+
+ r = svm_migrate_vram_to_ram(prange, mm);
+ if (r)
+ pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r,
+ prange, prange->start, prange->last);
+
+ op = SVM_OP_UPDATE_RANGE_NOTIFIER;
+ svm_range_add_list_work(&p->svms, parent, mm, op);
+ schedule_deferred_list_work(&p->svms);
+
+out_unlock_prange:
+ if (prange != parent)
+ mutex_unlock(&prange->migrate_mutex);
+ mutex_unlock(&parent->migrate_mutex);
+out:
+ mutex_unlock(&p->svms.lock);
+ kfd_unref_process(p);
+
+ pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
+
+ return r ? VM_FAULT_SIGBUS : 0;
}
static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {