From 953cab73b8bc487da330aa454abd7f8c7466737e Mon Sep 17 00:00:00 2001 From: Madhurkiran Harikrishnan Date: Mon, 24 Feb 2020 18:32:16 -0800 Subject: [LINUX][rel-v2020.1][PATCH v1 2/3] Support for vm_insert_pfn deprecated from kernel 4.20 From kernel 4.20 onwards, support for vm_insert_pfn is deprecated. Hence, replace the same with vmf_insert_pfn. Signed-off-by: Madhurkiran Harikrishnan --- .../devicedrv/mali/linux/mali_memory_block_alloc.c | 6 +++++- driver/src/devicedrv/mali/linux/mali_memory_cow.c | 14 ++++++++++++-- .../src/devicedrv/mali/linux/mali_memory_os_alloc.c | 20 +++++++++++++++++--- driver/src/devicedrv/mali/linux/mali_memory_secure.c | 7 ++++++- 4 files changed, 40 insertions(+), 7 deletions(-) diff --git a/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c b/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c index 0c5b6c3..e528699 100644 --- linux/mali_memory_block_alloc.c +++ b/linux/mali_memory_block_alloc.c @@ -309,9 +309,13 @@ int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *v list_for_each_entry(m_page, &block_mem->pfns, list) { MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + ret = vmf_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page)); + if (unlikely(VM_FAULT_ERROR & ret)) { +#else ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page)); - if (unlikely(0 != ret)) { +#endif return -EFAULT; } addr += _MALI_OSK_MALI_PAGE_SIZE; diff --git a/driver/src/devicedrv/mali/linux/mali_memory_cow.c b/driver/src/devicedrv/mali/linux/mali_memory_cow.c index f1d44fe..1dae1d6 100644 --- linux/mali_memory_cow.c +++ b/linux/mali_memory_cow.c @@ -532,9 +532,14 @@ int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma * flush which makes it way slower than remap_pfn_range or vm_insert_pfn. ret = vm_insert_page(vma, addr, page); */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + ret = vmf_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page)); + if (unlikely(VM_FAULT_ERROR & ret)) { +#else ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page)); - if (unlikely(0 != ret)) { +#endif + return ret; } addr += _MALI_OSK_MALI_PAGE_SIZE; @@ -569,9 +574,14 @@ _mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bken list_for_each_entry(m_page, &cow->pages, list) { if ((count >= offset) && (count < offset + num)) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + ret = vmf_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page)); + if (unlikely(VM_FAULT_ERROR & ret)) { +#else ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page)); - if (unlikely(0 != ret)) { +#endif + if (count == offset) { return _MALI_OSK_ERR_FAULT; } else { diff --git a/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c b/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c index 3fb6f05..7de3920 100644 --- linux/mali_memory_os_alloc.c +++ b/linux/mali_memory_os_alloc.c @@ -378,9 +378,14 @@ int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma) ret = vm_insert_page(vma, addr, page); */ page = m_page->page; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + ret = vmf_insert_pfn(vma, addr, page_to_pfn(page)); + if (unlikely(VM_FAULT_ERROR & ret)) { +#else ret = vm_insert_pfn(vma, addr, page_to_pfn(page)); - if (unlikely(0 != ret)) { +#endif + return -EFAULT; } addr += _MALI_OSK_MALI_PAGE_SIZE; @@ -416,9 +421,13 @@ _mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bken vm_end -= _MALI_OSK_MALI_PAGE_SIZE; if (mapping_page_num > 0) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + ret = vmf_insert_pfn(vma, vm_end, page_to_pfn(m_page->page)); + if (unlikely(VM_FAULT_ERROR & ret)) { +#else ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page)); - if (unlikely(0 != ret)) { +#endif /*will return -EBUSY If the page has already been mapped into table, but it's OK*/ if (-EBUSY == ret) { break; @@ -439,9 +448,14 @@ _mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bken list_for_each_entry(m_page, &os_mem->pages, list) { if (count >= offset) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + ret = vmf_insert_pfn(vma, vstart, page_to_pfn(m_page->page)); + if (unlikely(VM_FAULT_ERROR & ret)) { +#else ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page)); - if (unlikely(0 != ret)) { +#endif + /*will return -EBUSY If the page has already been mapped into table, but it's OK*/ if (-EBUSY == ret) { break; diff --git a/driver/src/devicedrv/mali/linux/mali_memory_secure.c b/driver/src/devicedrv/mali/linux/mali_memory_secure.c index 5546304..cebd1c8 100644 --- linux/mali_memory_secure.c +++ b/linux/mali_memory_secure.c @@ -132,9 +132,14 @@ int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct * MALI_DEBUG_ASSERT(0 == size % _MALI_OSK_MALI_PAGE_SIZE); for (j = 0; j < size / _MALI_OSK_MALI_PAGE_SIZE; j++) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys)); + if (unlikely(VM_FAULT_ERROR & ret)) { +#else ret = vm_insert_pfn(vma, addr, PFN_DOWN(phys)); - if (unlikely(0 != ret)) { +#endif + return -EFAULT; } addr += _MALI_OSK_MALI_PAGE_SIZE; -- 2.7.4