summaryrefslogtreecommitdiff
path: root/arch/arc/mm/highmem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 05:35:53 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 05:35:53 +0300
commitedd7ab76847442e299af64a761febd180d71f98d (patch)
tree91c3ee6ca27074c655bc1aaf04358f4e41df54d4 /arch/arc/mm/highmem.c
parentadb35e8dc98ba9bda99ff79ac6a05b8fcde2a762 (diff)
parent68061c02bb295da4955f0d309b9459f0a7ba83dd (diff)
downloadlinux-edd7ab76847442e299af64a761febd180d71f98d.tar.xz
Merge tag 'core-mm-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull kmap updates from Thomas Gleixner: "The new preemtible kmap_local() implementation: - Consolidate all kmap_atomic() internals into a generic implementation which builds the base for the kmap_local() API and make the kmap_atomic() interface wrappers which handle the disabling/enabling of preemption and pagefaults. - Switch the storage from per-CPU to per task and provide scheduler support for clearing mapping when scheduling out and restoring them when scheduling back in. - Merge the migrate_disable/enable() code, which is also part of the scheduler pull request. This was required to make the kmap_local() interface available which does not disable preemption when a mapping is established. It has to disable migration instead to guarantee that the virtual address of the mapped slot is the same across preemption. - Provide better debug facilities: guard pages and enforced utilization of the mapping mechanics on 64bit systems when the architecture allows it. - Provide the new kmap_local() API which can now be used to cleanup the kmap_atomic() usage sites all over the place. Most of the usage sites do not require the implicit disabling of preemption and pagefaults so the penalty on 64bit and 32bit non-highmem systems is removed and quite some of the code can be simplified. A wholesale conversion is not possible because some usage depends on the implicit side effects and some need to be cleaned up because they work around these side effects. The migrate disable side effect is only effective on highmem systems and when enforced debugging is enabled. On 64bit and 32bit non-highmem systems the overhead is completely avoided" * tag 'core-mm-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) ARM: highmem: Fix cache_is_vivt() reference x86/crashdump/32: Simplify copy_oldmem_page() io-mapping: Provide iomap_local variant mm/highmem: Provide kmap_local* sched: highmem: Store local kmaps in task struct x86: Support kmap_local() forced debugging mm/highmem: Provide CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP mm/highmem: Provide and use CONFIG_DEBUG_KMAP_LOCAL microblaze/mm/highmem: Add dropped #ifdef back xtensa/mm/highmem: Make generic kmap_atomic() work correctly mm/highmem: Take kmap_high_get() properly into account highmem: High implementation details and document API Documentation/io-mapping: Remove outdated blurb io-mapping: Cleanup atomic iomap mm/highmem: Remove the old kmap_atomic cruft highmem: Get rid of kmap_types.h xtensa/mm/highmem: Switch to generic kmap atomic sparc/mm/highmem: Switch to generic kmap atomic powerpc/mm/highmem: Switch to generic kmap atomic nds32/mm/highmem: Switch to generic kmap atomic ...
Diffstat (limited to 'arch/arc/mm/highmem.c')
-rw-r--r--arch/arc/mm/highmem.c54
1 files changed, 5 insertions, 49 deletions
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 1b9f473c6369..c79912a6b196 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -36,9 +36,8 @@
* This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
* 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
*
- * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
- * slots across NR_CPUS would be more than sufficient (generic code defines
- * KM_TYPE_NR as 20).
+ * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
+ * CPU. So the number of CPUs sharing a single PTE page is limited.
*
* - pkmap being preemptible, in theory could do with more than 256 concurrent
* mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
@@ -47,48 +46,6 @@
*/
extern pte_t * pkmap_page_table;
-static pte_t * fixmap_page_table;
-
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- int idx, cpu_idx;
- unsigned long vaddr;
-
- cpu_idx = kmap_atomic_idx_push();
- idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
- vaddr = FIXMAP_ADDR(idx);
-
- set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
- mk_pte(page, prot));
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kv)
-{
- unsigned long kvaddr = (unsigned long)kv;
-
- if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
-
- /*
- * Because preemption is disabled, this vaddr can be associated
- * with the current allocated index.
- * But in case of multiple live kmap_atomic(), it still relies on
- * callers to unmap in right order.
- */
- int cpu_idx = kmap_atomic_idx();
- int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
-
- WARN_ON(kvaddr != FIXMAP_ADDR(idx));
-
- pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
- local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
-
- kmap_atomic_idx_pop();
- }
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
@@ -108,10 +65,9 @@ void __init kmap_init(void)
{
/* Due to recursive include hell, we can't do this in processor.h */
BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+ BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
- BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
-
- BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
- fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
+ alloc_kmap_pgtable(FIXMAP_BASE);
}