summaryrefslogtreecommitdiff
path: root/mm/highmem.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-11-03 12:27:18 +0300
committerThomas Gleixner <tglx@linutronix.de>2020-11-07 01:14:54 +0300
commit298fa1ad5571f59cb3ca5497a9455f36867f065e (patch)
tree29c072412a6a6f6906d5a9c9c784c33e75b2e909 /mm/highmem.c
parent4f8b96cd47b06f1e3ec71c1a3216113efe8dbfb5 (diff)
downloadlinux-298fa1ad5571f59cb3ca5497a9455f36867f065e.tar.xz
highmem: Provide generic variant of kmap_atomic*
The kmap_atomic* interfaces in all architectures are pretty much the same except for post map operations (flush) and pre- and post unmap operations. Provide a generic variant for that. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linuxfoundation.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lore.kernel.org/r/20201103095857.175939340@linutronix.de
Diffstat (limited to 'mm/highmem.c')
-rw-r--r--mm/highmem.c144
1 files changed, 142 insertions, 2 deletions
diff --git a/mm/highmem.c b/mm/highmem.c
index 6abfd762eee7..bb4ce13ee7e7 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -31,9 +31,11 @@
#include <asm/tlbflush.h>
#include <linux/vmalloc.h>
+#ifndef CONFIG_KMAP_LOCAL
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
DEFINE_PER_CPU(int, __kmap_atomic_idx);
#endif
+#endif
/*
* Virtual_count is not a pure "count".
@@ -365,9 +367,147 @@ void kunmap_high(struct page *page)
if (need_wakeup)
wake_up(pkmap_map_wait);
}
-
EXPORT_SYMBOL(kunmap_high);
-#endif /* CONFIG_HIGHMEM */
+#endif /* CONFIG_HIGHMEM */
+
+#ifdef CONFIG_KMAP_LOCAL
+
+#include <asm/kmap_size.h>
+
+static DEFINE_PER_CPU(int, __kmap_local_idx);
+
+static inline int kmap_local_idx_push(void)
+{
+ int idx = __this_cpu_inc_return(__kmap_local_idx) - 1;
+
+ WARN_ON_ONCE(in_irq() && !irqs_disabled());
+ BUG_ON(idx >= KM_MAX_IDX);
+ return idx;
+}
+
+static inline int kmap_local_idx(void)
+{
+ return __this_cpu_read(__kmap_local_idx) - 1;
+}
+
+static inline void kmap_local_idx_pop(void)
+{
+ int idx = __this_cpu_dec_return(__kmap_local_idx);
+
+ BUG_ON(idx < 0);
+}
+
+#ifndef arch_kmap_local_post_map
+# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
+#endif
+#ifndef arch_kmap_local_pre_unmap
+# define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
+#endif
+
+#ifndef arch_kmap_local_post_unmap
+# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
+#endif
+
+#ifndef arch_kmap_local_map_idx
+#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
+#endif
+
+#ifndef arch_kmap_local_unmap_idx
+#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
+#endif
+
+#ifndef arch_kmap_local_high_get
+static inline void *arch_kmap_local_high_get(struct page *page)
+{
+ return NULL;
+}
+#endif
+
+/* Unmap a local mapping which was obtained by kmap_high_get() */
+static inline void kmap_high_unmap_local(unsigned long vaddr)
+{
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+ if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP))
+ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
+#endif
+}
+
+static inline int kmap_local_calc_idx(int idx)
+{
+ return idx + KM_MAX_IDX * smp_processor_id();
+}
+
+static pte_t *__kmap_pte;
+
+static pte_t *kmap_get_pte(void)
+{
+ if (!__kmap_pte)
+ __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
+ return __kmap_pte;
+}
+
+void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+ pte_t pteval, *kmap_pte = kmap_get_pte();
+ unsigned long vaddr;
+ int idx;
+
+ preempt_disable();
+ idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+ pteval = pfn_pte(pfn, prot);
+ set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
+ arch_kmap_local_post_map(vaddr, pteval);
+ preempt_enable();
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
+
+void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ void *kmap;
+
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ /* Try kmap_high_get() if architecture has it enabled */
+ kmap = arch_kmap_local_high_get(page);
+ if (kmap)
+ return kmap;
+
+ return __kmap_local_pfn_prot(page_to_pfn(page), prot);
+}
+EXPORT_SYMBOL(__kmap_local_page_prot);
+
+void kunmap_local_indexed(void *vaddr)
+{
+ unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
+ pte_t *kmap_pte = kmap_get_pte();
+ int idx;
+
+ if (addr < __fix_to_virt(FIX_KMAP_END) ||
+ addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
+ WARN_ON_ONCE(addr < PAGE_OFFSET);
+
+ /* Handle mappings which were obtained by kmap_high_get() */
+ kmap_high_unmap_local(addr);
+ return;
+ }
+
+ preempt_disable();
+ idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
+ WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ arch_kmap_local_pre_unmap(addr);
+ pte_clear(&init_mm, addr, kmap_pte - idx);
+ arch_kmap_local_post_unmap(addr);
+ kmap_local_idx_pop();
+ preempt_enable();
+}
+EXPORT_SYMBOL(kunmap_local_indexed);
+#endif
#if defined(HASHED_PAGE_VIRTUAL)