summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/nohash/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/nohash/pgtable.h')
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h167
1 files changed, 88 insertions, 79 deletions
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index c56de1e8026f..70ff23974b59 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -8,19 +8,50 @@
#include <asm/nohash/32/pgtable.h>
#endif
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+ _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+/*
+ * Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
+
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP (PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
+
#ifndef __ASSEMBLY__
/* Generic accessors to PTE bits */
+#ifndef pte_write
static inline int pte_write(pte_t pte)
{
- return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
+ return pte_val(pte) & _PAGE_RW;
}
+#endif
static inline int pte_read(pte_t pte) { return 1; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
-static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+static inline bool pte_hashpte(pte_t pte) { return false; }
+static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
+static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
#ifdef CONFIG_NUMA_BALANCING
/*
@@ -30,8 +61,7 @@ static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PA
*/
static inline int pte_protnone(pte_t pte)
{
- return (pte_val(pte) &
- (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
+ return pte_present(pte) && !pte_user(pte);
}
static inline int pmd_protnone(pmd_t pmd)
@@ -45,6 +75,23 @@ static inline int pte_present(pte_t pte)
return pte_val(pte) & _PAGE_PRESENT;
}
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/*
+ * Don't just check for any non zero bits in __PAGE_USER, since for book3e
+ * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
+ * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
+ */
+#ifndef pte_user
+static inline bool pte_user(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
+}
+#endif
+
/*
* We only find page table entry in the last level
* Hence no need for other accessors
@@ -52,17 +99,14 @@ static inline int pte_present(pte_t pte)
#define pte_access_permitted pte_access_permitted
static inline bool pte_access_permitted(pte_t pte, bool write)
{
- unsigned long pteval = pte_val(pte);
/*
* A read-only access is controlled by _PAGE_USER bit.
* We have _PAGE_READ set for WRITE and EXECUTE
*/
- unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER;
-
- if (write)
- need_pte_bits |= _PAGE_WRITE;
+ if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+ return false;
- if ((pteval & need_pte_bits) != need_pte_bits)
+ if (write && !pte_write(pte))
return false;
return true;
@@ -81,53 +125,53 @@ static inline unsigned long pte_pfn(pte_t pte) {
return pte_val(pte) >> PTE_RPN_SHIFT; }
/* Generic modifiers for PTE bits */
-static inline pte_t pte_wrprotect(pte_t pte)
+static inline pte_t pte_exprotect(pte_t pte)
{
- pte_basic_t ptev;
-
- ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE);
- ptev |= _PAGE_RO;
- return __pte(ptev);
+ return __pte(pte_val(pte) & ~_PAGE_EXEC);
}
+#ifndef pte_mkclean
static inline pte_t pte_mkclean(pte_t pte)
{
- return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
+ return __pte(pte_val(pte) & ~_PAGE_DIRTY);
}
+#endif
static inline pte_t pte_mkold(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkpte(pte_t pte)
{
- pte_basic_t ptev;
-
- ptev = pte_val(pte) & ~_PAGE_RO;
- ptev |= _PAGE_RW;
- return __pte(ptev);
+ return pte;
}
-static inline pte_t pte_mkdirty(pte_t pte)
+static inline pte_t pte_mkspecial(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_DIRTY);
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
}
-static inline pte_t pte_mkyoung(pte_t pte)
+#ifndef pte_mkhuge
+static inline pte_t pte_mkhuge(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
+ return __pte(pte_val(pte));
}
+#endif
-static inline pte_t pte_mkspecial(pte_t pte)
+#ifndef pte_mkprivileged
+static inline pte_t pte_mkprivileged(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPECIAL);
+ return __pte(pte_val(pte) & ~_PAGE_USER);
}
+#endif
-static inline pte_t pte_mkhuge(pte_t pte)
+#ifndef pte_mkuser
+static inline pte_t pte_mkuser(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_HUGE);
+ return __pte(pte_val(pte) | _PAGE_USER);
}
+#endif
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
@@ -148,70 +192,33 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int percpu)
{
-#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
- /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
- * helper pte_update() which does an atomic update. We need to do that
- * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
- * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
- * the hash bits instead (ie, same as the non-SMP case)
- */
- if (percpu)
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
- else
- pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
-
-#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
/* Second case is 32-bit with 64-bit PTE. In this case, we
* can just store as long as we do the two halves in the right order
- * with a barrier in between. This is possible because we take care,
- * in the hash code, to pre-invalidate if the PTE was already hashed,
- * which synchronizes us with any concurrent invalidation.
- * In the percpu case, we also fallback to the simple update preserving
- * the hash bits
+ * with a barrier in between.
+ * In the percpu case, we also fallback to the simple update
*/
- if (percpu) {
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
+ if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
+ __asm__ __volatile__("\
+ stw%U0%X0 %2,%0\n\
+ eieio\n\
+ stw%U0%X0 %L2,%1"
+ : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+ : "r" (pte) : "memory");
return;
}
-#if _PAGE_HASHPTE != 0
- if (pte_val(*ptep) & _PAGE_HASHPTE)
- flush_hash_entry(mm, ptep, addr);
-#endif
- __asm__ __volatile__("\
- stw%U0%X0 %2,%0\n\
- eieio\n\
- stw%U0%X0 %L2,%1"
- : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
- : "r" (pte) : "memory");
-
-#elif defined(CONFIG_PPC_STD_MMU_32)
- /* Third case is 32-bit hash table in UP mode, we need to preserve
- * the _PAGE_HASHPTE bit since we may not have invalidated the previous
- * translation in the hash yet (done in a subsequent flush_tlb_xxx())
- * and see we need to keep track that this PTE needs invalidating
- */
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
-
-#else
/* Anything else just stores the PTE normally. That covers all 64-bit
* cases, and 32-bit non-hash with 32-bit PTEs.
*/
*ptep = pte;
-#ifdef CONFIG_PPC_BOOK3E_64
/*
* With hardware tablewalk, a sync is needed to ensure that
* subsequent accesses see the PTE we just wrote. Unlike userspace
* mappings, we can't tolerate spurious faults, so make sure
* the new PTE will be seen the first time.
*/
- if (is_kernel_addr(addr))
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
mb();
-#endif
-#endif
}
@@ -238,6 +245,8 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre
#if _PAGE_WRITETHRU != 0
#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
_PAGE_COHERENT | _PAGE_WRITETHRU))
+#else
+#define pgprot_cached_wthru(prot) pgprot_noncached(prot)
#endif
#define pgprot_cached_noncoherent(prot) \