summaryrefslogtreecommitdiff
path: root/arch/openrisc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-21 20:07:42 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-21 20:07:42 +0300
commit84da111de0b4be15bd500deff773f5116f39f7be (patch)
tree76b5796f8258397bf7a3926b742a89166a8501ef /arch/openrisc
parent227c3e9eb5cf3552c2cc83225df6d14adb05f8e8 (diff)
parent62974fc389b364d8af70e044836362222bd3ae53 (diff)
downloadlinux-84da111de0b4be15bd500deff773f5116f39f7be.tar.xz
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull hmm updates from Jason Gunthorpe: "This is more cleanup and consolidation of the hmm APIs and the very strongly related mmu_notifier interfaces. Many places across the tree using these interfaces are touched in the process. Beyond that a cleanup to the page walker API and a few memremap related changes round out the series: - General improvement of hmm_range_fault() and related APIs, more documentation, bug fixes from testing, API simplification & consolidation, and unused API removal - Simplify the hmm related kconfigs to HMM_MIRROR and DEVICE_PRIVATE, and make them internal kconfig selects - Hoist a lot of code related to mmu notifier attachment out of drivers by using a refcount get/put attachment idiom and remove the convoluted mmu_notifier_unregister_no_release() and related APIs. - General API improvement for the migrate_vma API and revision of its only user in nouveau - Annotate mmu_notifiers with lockdep and sleeping region debugging Two series unrelated to HMM or mmu_notifiers came along due to dependencies: - Allow pagemap's memremap_pages family of APIs to work without providing a struct device - Make walk_page_range() and related use a constant structure for function pointers" * tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (75 commits) libnvdimm: Enable unit test infrastructure compile checks mm, notifier: Catch sleeping/blocking for !blockable kernel.h: Add non_block_start/end() drm/radeon: guard against calling an unpaired radeon_mn_unregister() csky: add missing brackets in a macro for tlb.h pagewalk: use lockdep_assert_held for locking validation pagewalk: separate function pointers from iterator data mm: split out a new pagewalk.h header from mm.h mm/mmu_notifiers: annotate with might_sleep() mm/mmu_notifiers: prime lockdep mm/mmu_notifiers: add a lockdep map for invalidate_range_start/end mm/mmu_notifiers: remove the __mmu_notifier_invalidate_range_start/end exports mm/hmm: hmm_range_fault() infinite loop mm/hmm: hmm_range_fault() NULL pointer bug mm/hmm: fix hmm_range_fault()'s handling of swapped out pages mm/mmu_notifiers: remove unregister_no_release RDMA/odp: remove ib_ucontext from ib_umem RDMA/odp: use mmu_notifier_get/put for 'struct ib_ucontext_per_mm' RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr RDMA/mlx5: Use ib_umem_start instead of umem.address ...
Diffstat (limited to 'arch/openrisc')
-rw-r--r--arch/openrisc/kernel/dma.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index b41a79fcdbd9..4d5b8bd1d795 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -16,6 +16,7 @@
*/
#include <linux/dma-noncoherent.h>
+#include <linux/pagewalk.h>
#include <asm/cpuinfo.h>
#include <asm/spr_defs.h>
@@ -43,6 +44,10 @@ page_set_nocache(pte_t *pte, unsigned long addr,
return 0;
}
+static const struct mm_walk_ops set_nocache_walk_ops = {
+ .pte_entry = page_set_nocache,
+};
+
static int
page_clear_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
@@ -58,6 +63,10 @@ page_clear_nocache(pte_t *pte, unsigned long addr,
return 0;
}
+static const struct mm_walk_ops clear_nocache_walk_ops = {
+ .pte_entry = page_clear_nocache,
+};
+
/*
* Alloc "coherent" memory, which for OpenRISC means simply uncached.
*
@@ -80,10 +89,6 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
{
unsigned long va;
void *page;
- struct mm_walk walk = {
- .pte_entry = page_set_nocache,
- .mm = &init_mm
- };
page = alloc_pages_exact(size, gfp | __GFP_ZERO);
if (!page)
@@ -98,7 +103,8 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
*/
- if (walk_page_range(va, va + size, &walk)) {
+ if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
+ NULL)) {
free_pages_exact(page, size);
return NULL;
}
@@ -111,13 +117,10 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long va = (unsigned long)vaddr;
- struct mm_walk walk = {
- .pte_entry = page_clear_nocache,
- .mm = &init_mm
- };
/* walk_page_range shouldn't be able to fail here */
- WARN_ON(walk_page_range(va, va + size, &walk));
+ WARN_ON(walk_page_range(&init_mm, va, va + size,
+ &clear_nocache_walk_ops, NULL));
free_pages_exact(vaddr, size);
}