summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
-rw-r--r--arch/s390/include/asm/pgtable.h648
1 files changed, 352 insertions, 296 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 75fb726de91f..9b60a36c348d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -217,63 +217,57 @@ extern unsigned long MODULES_END;
/* Hardware bits in the page table entry */
#define _PAGE_CO 0x100 /* HW Change-bit override */
-#define _PAGE_RO 0x200 /* HW read-only bit */
+#define _PAGE_PROTECT 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
+#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
/* Software bits in the page table entry */
-#define _PAGE_SWT 0x001 /* SW pte type bit t */
-#define _PAGE_SWX 0x002 /* SW pte type bit x */
-#define _PAGE_SWC 0x004 /* SW pte changed bit */
-#define _PAGE_SWR 0x008 /* SW pte referenced bit */
-#define _PAGE_SWW 0x010 /* SW pte write bit */
-#define _PAGE_SPECIAL 0x020 /* SW associated with special page */
+#define _PAGE_PRESENT 0x001 /* SW pte present bit */
+#define _PAGE_TYPE 0x002 /* SW pte type bit */
+#define _PAGE_YOUNG 0x004 /* SW pte young bit */
+#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
+#define _PAGE_READ 0x010 /* SW pte read bit */
+#define _PAGE_WRITE 0x020 /* SW pte write bit */
+#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
#define __HAVE_ARCH_PTE_SPECIAL
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
- _PAGE_SWC | _PAGE_SWR)
-
-/* Six different types of pages. */
-#define _PAGE_TYPE_EMPTY 0x400
-#define _PAGE_TYPE_NONE 0x401
-#define _PAGE_TYPE_SWAP 0x403
-#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
-#define _PAGE_TYPE_RO 0x200
-#define _PAGE_TYPE_RW 0x000
-
-/*
- * Only four types for huge pages, using the invalid bit and protection bit
- * of a segment table entry.
- */
-#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
-#define _HPAGE_TYPE_NONE 0x220
-#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
-#define _HPAGE_TYPE_RW 0x000
+ _PAGE_DIRTY | _PAGE_YOUNG)
/*
- * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
- * pte_none and pte_file to find out the pte type WITHOUT holding the page
- * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
- * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
- * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
- * This change is done while holding the lock, but the intermediate step
- * of a previously valid pte with the hw invalid bit set can be observed by
- * handle_pte_fault. That makes it necessary that all valid pte types with
- * the hw invalid bit set must be distinguishable from the four pte types
- * empty, none, swap and file.
+ * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
+ * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
+ * is used to distinguish present from not-present ptes. It is changed only
+ * with the page table lock held.
+ *
+ * The following table gives the different possible bit combinations for
+ * the pte hardware and software bits in the last 12 bits of a pte:
*
- * irxt ipte irxt
- * _PAGE_TYPE_EMPTY 1000 -> 1000
- * _PAGE_TYPE_NONE 1001 -> 1001
- * _PAGE_TYPE_SWAP 1011 -> 1011
- * _PAGE_TYPE_FILE 11?1 -> 11?1
- * _PAGE_TYPE_RO 0100 -> 1100
- * _PAGE_TYPE_RW 0000 -> 1000
+ * 842100000000
+ * 000084210000
+ * 000000008421
+ * .IR...wrdytp
+ * empty .10...000000
+ * swap .10...xxxx10
+ * file .11...xxxxx0
+ * prot-none, clean, old .11...000001
+ * prot-none, clean, young .11...000101
+ * prot-none, dirty, old .10...001001
+ * prot-none, dirty, young .10...001101
+ * read-only, clean, old .11...010001
+ * read-only, clean, young .01...010101
+ * read-only, dirty, old .11...011001
+ * read-only, dirty, young .01...011101
+ * read-write, clean, old .11...110001
+ * read-write, clean, young .01...110101
+ * read-write, dirty, old .10...111001
+ * read-write, dirty, young .00...111101
*
- * pte_none is true for bits combinations 1000, 1010, 1100, 1110
- * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
- * pte_file is true for bits combinations 1101, 1111
- * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
+ * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
+ * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
+ * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
+ * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
*/
#ifndef CONFIG_64BIT
@@ -286,14 +280,25 @@ extern unsigned long MODULES_END;
#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
/* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
-#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
-#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
+#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
+#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT
#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
-#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
+#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
+
+/*
+ * Segment table entry encoding (I = invalid, R = read-only bit):
+ * ..R...I.....
+ * prot-none ..1...1.....
+ * read-only ..1...0.....
+ * read-write ..0...0.....
+ * empty ..0...1.....
+ */
/* Page status table bits for virtualization */
#define PGSTE_ACC_BITS 0xf0000000UL
@@ -303,9 +308,7 @@ extern unsigned long MODULES_END;
#define PGSTE_HC_BIT 0x00200000UL
#define PGSTE_GR_BIT 0x00040000UL
#define PGSTE_GC_BIT 0x00020000UL
-#define PGSTE_UR_BIT 0x00008000UL
-#define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */
+#define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */
#else /* CONFIG_64BIT */
@@ -324,8 +327,8 @@ extern unsigned long MODULES_END;
/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
-#define _REGION_ENTRY_RO 0x200 /* region protection bit */
-#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
+#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
+#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
@@ -333,29 +336,47 @@ extern unsigned long MODULES_END;
#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
-#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
+#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
-#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
+#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
-#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
+#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
/* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
+#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
-#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
-#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
+#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY (0)
-#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
+#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
+#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */
+#define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */
+#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG
+
+/*
+ * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
+ * ..R...I...y.
+ * prot-none, old ..0...1...1.
+ * prot-none, young ..1...1...1.
+ * read-only, old ..1...1...0.
+ * read-only, young ..1...0...1.
+ * read-write, old ..0...1...0.
+ * read-write, young ..0...0...1.
+ * The segment table origin is used to distinguish empty (origin==0) from
+ * read-write, old segment table entries (origin!=0)
+ */
+
#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
-#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
/* Set of bits not changed in pmd_modify */
#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
@@ -369,9 +390,7 @@ extern unsigned long MODULES_END;
#define PGSTE_HC_BIT 0x0020000000000000UL
#define PGSTE_GR_BIT 0x0004000000000000UL
#define PGSTE_GC_BIT 0x0002000000000000UL
-#define PGSTE_UR_BIT 0x0000800000000000UL
-#define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
+#define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */
#endif /* CONFIG_64BIT */
@@ -386,14 +405,18 @@ extern unsigned long MODULES_END;
/*
* Page protection definitions.
*/
-#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
-#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
-#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
-#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
-
-#define PAGE_KERNEL PAGE_RWC
-#define PAGE_SHARED PAGE_KERNEL
-#define PAGE_COPY PAGE_RO
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
+#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_INVALID | _PAGE_PROTECT)
+#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_INVALID | _PAGE_PROTECT)
+
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_YOUNG | _PAGE_DIRTY)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_YOUNG | _PAGE_DIRTY)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
+ _PAGE_PROTECT)
/*
* On s390 the page table entry has an invalid bit and a read-only bit.
@@ -402,35 +425,31 @@ extern unsigned long MODULES_END;
*/
/*xwr*/
#define __P000 PAGE_NONE
-#define __P001 PAGE_RO
-#define __P010 PAGE_RO
-#define __P011 PAGE_RO
-#define __P100 PAGE_RO
-#define __P101 PAGE_RO
-#define __P110 PAGE_RO
-#define __P111 PAGE_RO
+#define __P001 PAGE_READ
+#define __P010 PAGE_READ
+#define __P011 PAGE_READ
+#define __P100 PAGE_READ
+#define __P101 PAGE_READ
+#define __P110 PAGE_READ
+#define __P111 PAGE_READ
#define __S000 PAGE_NONE
-#define __S001 PAGE_RO
-#define __S010 PAGE_RW
-#define __S011 PAGE_RW
-#define __S100 PAGE_RO
-#define __S101 PAGE_RO
-#define __S110 PAGE_RW
-#define __S111 PAGE_RW
+#define __S001 PAGE_READ
+#define __S010 PAGE_WRITE
+#define __S011 PAGE_WRITE
+#define __S100 PAGE_READ
+#define __S101 PAGE_READ
+#define __S110 PAGE_WRITE
+#define __S111 PAGE_WRITE
/*
* Segment entry (large page) protection definitions.
*/
-#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
-#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
-#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
-
-static inline int mm_exclusive(struct mm_struct *mm)
-{
- return likely(mm == current->active_mm &&
- atomic_read(&mm->context.attach_count) <= 1);
-}
+#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
+ _SEGMENT_ENTRY_NONE)
+#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \
+ _SEGMENT_ENTRY_PROTECT)
+#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID)
static inline int mm_has_pgste(struct mm_struct *mm)
{
@@ -467,7 +486,7 @@ static inline int pgd_none(pgd_t pgd)
{
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
return 0;
- return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
+ return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
}
static inline int pgd_bad(pgd_t pgd)
@@ -478,7 +497,7 @@ static inline int pgd_bad(pgd_t pgd)
* invalid for either table entry.
*/
unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
+ ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
return (pgd_val(pgd) & mask) != 0;
}
@@ -494,7 +513,7 @@ static inline int pud_none(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
return 0;
- return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
+ return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
}
static inline int pud_large(pud_t pud)
@@ -512,7 +531,7 @@ static inline int pud_bad(pud_t pud)
* invalid for either table entry.
*/
unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
+ ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
return (pud_val(pud) & mask) != 0;
}
@@ -521,30 +540,36 @@ static inline int pud_bad(pud_t pud)
static inline int pmd_present(pmd_t pmd)
{
- unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
- return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
- !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
+ return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
}
static inline int pmd_none(pmd_t pmd)
{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
- !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
+ return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
}
static inline int pmd_large(pmd_t pmd)
{
#ifdef CONFIG_64BIT
- return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
#else
return 0;
#endif
}
+static inline int pmd_prot_none(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) &&
+ (pmd_val(pmd) & _SEGMENT_ENTRY_NONE);
+}
+
static inline int pmd_bad(pmd_t pmd)
{
- unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
- return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
+#ifdef CONFIG_64BIT
+ if (pmd_large(pmd))
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
+#endif
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
@@ -563,31 +588,40 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
+ if (pmd_prot_none(pmd))
+ return 0;
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0;
}
static inline int pmd_young(pmd_t pmd)
{
- return 0;
+ int young = 0;
+#ifdef CONFIG_64BIT
+ if (pmd_prot_none(pmd))
+ young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0;
+ else
+ young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
+#endif
+ return young;
}
-static inline int pte_none(pte_t pte)
+static inline int pte_present(pte_t pte)
{
- return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
+ /* Bit pattern: (pte & 0x001) == 0x001 */
+ return (pte_val(pte) & _PAGE_PRESENT) != 0;
}
-static inline int pte_present(pte_t pte)
+static inline int pte_none(pte_t pte)
{
- unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
- return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
- (!(pte_val(pte) & _PAGE_INVALID) &&
- !(pte_val(pte) & _PAGE_SWT));
+ /* Bit pattern: pte == 0x400 */
+ return pte_val(pte) == _PAGE_INVALID;
}
static inline int pte_file(pte_t pte)
{
- unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
- return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
+ /* Bit pattern: (pte & 0x601) == 0x600 */
+ return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
+ == (_PAGE_INVALID | _PAGE_PROTECT);
}
static inline int pte_special(pte_t pte)
@@ -634,6 +668,15 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
#endif
}
+static inline pgste_t pgste_get(pte_t *ptep)
+{
+ unsigned long pgste = 0;
+#ifdef CONFIG_PGSTE
+ pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
+#endif
+ return __pgste(pgste);
+}
+
static inline void pgste_set(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
@@ -644,33 +687,28 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste)
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
- unsigned long address, bits;
- unsigned char skey;
+ unsigned long address, bits, skey;
if (pte_val(*ptep) & _PAGE_INVALID)
return pgste;
address = pte_val(*ptep) & PAGE_MASK;
- skey = page_get_storage_key(address);
+ skey = (unsigned long) page_get_storage_key(address);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
- /* Clear page changed & referenced bit in the storage key */
- if (bits & _PAGE_CHANGED)
+ if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) {
+ /* Transfer dirty + referenced bit to host bits in pgste */
+ pgste_val(pgste) |= bits << 52;
page_set_storage_key(address, skey ^ bits, 0);
- else if (bits)
+ } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) &&
+ (bits & _PAGE_REFERENCED)) {
+ /* Transfer referenced bit to host bit in pgste */
+ pgste_val(pgste) |= PGSTE_HR_BIT;
page_reset_referenced(address);
+ }
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
- /* Get host changed & referenced bits from pgste */
- bits |= (pgste_val(pgste) & (PGSTE_HR_BIT | PGSTE_HC_BIT)) >> 52;
- /* Transfer page changed & referenced bit to kvm user bits */
- pgste_val(pgste) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
- /* Clear relevant host bits in pgste. */
- pgste_val(pgste) &= ~(PGSTE_HR_BIT | PGSTE_HC_BIT);
- pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
/* Copy page access key and fetch protection bit to pgste */
- pgste_val(pgste) |=
- (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
- /* Transfer referenced bit to pte */
- pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
+ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
+ pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
#endif
return pgste;
@@ -679,24 +717,11 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
- int young;
-
if (pte_val(*ptep) & _PAGE_INVALID)
return pgste;
/* Get referenced bit from storage key */
- young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
- if (young)
- pgste_val(pgste) |= PGSTE_GR_BIT;
- /* Get host referenced bit from pgste */
- if (pgste_val(pgste) & PGSTE_HR_BIT) {
- pgste_val(pgste) &= ~PGSTE_HR_BIT;
- young = 1;
- }
- /* Transfer referenced bit to kvm user bits and pte */
- if (young) {
- pgste_val(pgste) |= PGSTE_UR_BIT;
- pte_val(*ptep) |= _PAGE_SWR;
- }
+ if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK))
+ pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT;
#endif
return pgste;
}
@@ -723,13 +748,13 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
{
- if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
+ if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
/*
* Without enhanced suppression-on-protection force
* the dirty bit on for all writable ptes.
*/
- pte_val(entry) |= _PAGE_SWC;
- pte_val(entry) &= ~_PAGE_RO;
+ pte_val(entry) |= _PAGE_DIRTY;
+ pte_val(entry) &= ~_PAGE_PROTECT;
}
*ptep = entry;
}
@@ -841,21 +866,17 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
*/
static inline int pte_write(pte_t pte)
{
- return (pte_val(pte) & _PAGE_SWW) != 0;
+ return (pte_val(pte) & _PAGE_WRITE) != 0;
}
static inline int pte_dirty(pte_t pte)
{
- return (pte_val(pte) & _PAGE_SWC) != 0;
+ return (pte_val(pte) & _PAGE_DIRTY) != 0;
}
static inline int pte_young(pte_t pte)
{
-#ifdef CONFIG_PGSTE
- if (pte_val(pte) & _PAGE_SWR)
- return 1;
-#endif
- return 0;
+ return (pte_val(pte) & _PAGE_YOUNG) != 0;
}
/*
@@ -880,12 +901,12 @@ static inline void pud_clear(pud_t *pud)
static inline void pmd_clear(pmd_t *pmdp)
{
- pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ pte_val(*ptep) = _PAGE_INVALID;
}
/*
@@ -896,55 +917,63 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
- if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
- pte_val(pte) &= ~_PAGE_RO;
+ /*
+ * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
+ * invalid bit set, clear it again for readable, young pages
+ */
+ if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
+ pte_val(pte) &= ~_PAGE_INVALID;
+ /*
+ * newprot for PAGE_READ and PAGE_WRITE has the page protection
+ * bit set, clear it again for writable, dirty pages
+ */
+ if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
+ pte_val(pte) &= ~_PAGE_PROTECT;
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
- pte_val(pte) &= ~_PAGE_SWW;
- /* Do not clobber _PAGE_TYPE_NONE pages! */
- if (!(pte_val(pte) & _PAGE_INVALID))
- pte_val(pte) |= _PAGE_RO;
+ pte_val(pte) &= ~_PAGE_WRITE;
+ pte_val(pte) |= _PAGE_PROTECT;
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
- pte_val(pte) |= _PAGE_SWW;
- if (pte_val(pte) & _PAGE_SWC)
- pte_val(pte) &= ~_PAGE_RO;
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pte_val(pte) & _PAGE_DIRTY)
+ pte_val(pte) &= ~_PAGE_PROTECT;
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
- pte_val(pte) &= ~_PAGE_SWC;
- /* Do not clobber _PAGE_TYPE_NONE pages! */
- if (!(pte_val(pte) & _PAGE_INVALID))
- pte_val(pte) |= _PAGE_RO;
+ pte_val(pte) &= ~_PAGE_DIRTY;
+ pte_val(pte) |= _PAGE_PROTECT;
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
- pte_val(pte) |= _PAGE_SWC;
- if (pte_val(pte) & _PAGE_SWW)
- pte_val(pte) &= ~_PAGE_RO;
+ pte_val(pte) |= _PAGE_DIRTY;
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte_val(pte) &= ~_PAGE_PROTECT;
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
-#ifdef CONFIG_PGSTE
- pte_val(pte) &= ~_PAGE_SWR;
-#endif
+ pte_val(pte) &= ~_PAGE_YOUNG;
+ pte_val(pte) |= _PAGE_INVALID;
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
+ pte_val(pte) |= _PAGE_YOUNG;
+ if (pte_val(pte) & _PAGE_READ)
+ pte_val(pte) &= ~_PAGE_INVALID;
return pte;
}
@@ -957,7 +986,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
#ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte)
{
- pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
+ pte_val(pte) |= _PAGE_LARGE;
return pte;
}
#endif
@@ -974,8 +1003,8 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_update_all(ptep, pgste);
- dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
- pgste_val(pgste) &= ~PGSTE_UC_BIT;
+ dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT);
+ pgste_val(pgste) &= ~PGSTE_HC_BIT;
pgste_set_unlock(ptep, pgste);
return dirty;
}
@@ -994,59 +1023,75 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_update_young(ptep, pgste);
- young = !!(pgste_val(pgste) & PGSTE_UR_BIT);
- pgste_val(pgste) &= ~PGSTE_UR_BIT;
+ young = !!(pgste_val(pgste) & PGSTE_HR_BIT);
+ pgste_val(pgste) &= ~PGSTE_HR_BIT;
pgste_set_unlock(ptep, pgste);
}
return young;
}
+static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
+{
+ if (!(pte_val(*ptep) & _PAGE_INVALID)) {
+#ifndef CONFIG_64BIT
+ /* pto must point to the start of the segment table */
+ pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
+#else
+ /* ipte in zarch mode can do the math */
+ pte_t *pto = ptep;
+#endif
+ asm volatile(
+ " ipte %2,%3"
+ : "=m" (*ptep) : "m" (*ptep),
+ "a" (pto), "a" (address));
+ }
+}
+
+static inline void ptep_flush_lazy(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ int active = (mm == current->active_mm) ? 1 : 0;
+
+ if (atomic_read(&mm->context.attach_count) > active)
+ __ptep_ipte(address, ptep);
+ else
+ mm->context.flush_mm = 1;
+}
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pgste_t pgste;
pte_t pte;
+ int young;
if (mm_has_pgste(vma->vm_mm)) {
pgste = pgste_get_lock(ptep);
- pgste = pgste_update_young(ptep, pgste);
- pte = *ptep;
- *ptep = pte_mkold(pte);
- pgste_set_unlock(ptep, pgste);
- return pte_young(pte);
+ pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
}
- return 0;
+
+ pte = *ptep;
+ __ptep_ipte(addr, ptep);
+ young = pte_young(pte);
+ pte = pte_mkold(pte);
+
+ if (mm_has_pgste(vma->vm_mm)) {
+ pgste_set_pte(ptep, pte);
+ pgste_set_unlock(ptep, pgste);
+ } else
+ *ptep = pte;
+
+ return young;
}
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- /* No need to flush TLB
- * On s390 reference bits are in storage key and never in TLB
- * With virtualization we handle the reference bit, without we
- * we can simply return */
return ptep_test_and_clear_young(vma, address, ptep);
}
-static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
-{
- if (!(pte_val(*ptep) & _PAGE_INVALID)) {
-#ifndef CONFIG_64BIT
- /* pto must point to the start of the segment table */
- pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
-#else
- /* ipte in zarch mode can do the math */
- pte_t *pto = ptep;
-#endif
- asm volatile(
- " ipte %2,%3"
- : "=m" (*ptep) : "m" (*ptep),
- "a" (pto), "a" (address));
- }
-}
-
/*
* This is hard to understand. ptep_get_and_clear and ptep_clear_flush
* both clear the TLB for the unmapped pte. The reason is that
@@ -1067,16 +1112,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
pgste_t pgste;
pte_t pte;
- mm->context.flush_mm = 1;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_ipte_notify(mm, address, ptep, pgste);
}
pte = *ptep;
- if (!mm_exclusive(mm))
- __ptep_ipte(address, ptep);
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ ptep_flush_lazy(mm, address, ptep);
+ pte_val(*ptep) = _PAGE_INVALID;
if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
@@ -1093,15 +1136,14 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
pgste_t pgste;
pte_t pte;
- mm->context.flush_mm = 1;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste_ipte_notify(mm, address, ptep, pgste);
}
pte = *ptep;
- if (!mm_exclusive(mm))
- __ptep_ipte(address, ptep);
+ ptep_flush_lazy(mm, address, ptep);
+ pte_val(*ptep) |= _PAGE_INVALID;
if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
@@ -1117,7 +1159,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
pgste_t pgste;
if (mm_has_pgste(mm)) {
- pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
+ pgste = pgste_get(ptep);
pgste_set_key(ptep, pgste, pte);
pgste_set_pte(ptep, pte);
pgste_set_unlock(ptep, pgste);
@@ -1139,7 +1181,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
pte = *ptep;
__ptep_ipte(address, ptep);
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ pte_val(*ptep) = _PAGE_INVALID;
if (mm_has_pgste(vma->vm_mm)) {
pgste = pgste_update_all(&pte, pgste);
@@ -1163,18 +1205,17 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
pgste_t pgste;
pte_t pte;
- if (mm_has_pgste(mm)) {
+ if (!full && mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
- if (!full)
- pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+ pgste = pgste_ipte_notify(mm, address, ptep, pgste);
}
pte = *ptep;
if (!full)
- __ptep_ipte(address, ptep);
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ ptep_flush_lazy(mm, address, ptep);
+ pte_val(*ptep) = _PAGE_INVALID;
- if (mm_has_pgste(mm)) {
+ if (!full && mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
pgste_set_unlock(ptep, pgste);
}
@@ -1189,14 +1230,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
pte_t pte = *ptep;
if (pte_write(pte)) {
- mm->context.flush_mm = 1;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_ipte_notify(mm, address, ptep, pgste);
}
- if (!mm_exclusive(mm))
- __ptep_ipte(address, ptep);
+ ptep_flush_lazy(mm, address, ptep);
pte = pte_wrprotect(pte);
if (mm_has_pgste(mm)) {
@@ -1240,7 +1279,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t __pte;
pte_val(__pte) = physpage + pgprot_val(pgprot);
- return __pte;
+ return pte_mkyoung(__pte);
}
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
@@ -1248,10 +1287,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
unsigned long physpage = page_to_phys(page);
pte_t __pte = mk_pte_phys(physpage, pgprot);
- if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
- pte_val(__pte) |= _PAGE_SWC;
- pte_val(__pte) &= ~_PAGE_RO;
- }
+ if (pte_write(__pte) && PageDirty(page))
+ __pte = pte_mkdirty(__pte);
return __pte;
}
@@ -1313,7 +1350,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
unsigned long sto = (unsigned long) pmdp -
pmd_index(address) * sizeof(pmd_t);
- if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
+ if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,0"
: "=m" (*pmdp)
@@ -1324,24 +1361,68 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
}
}
+static inline void __pmd_csp(pmd_t *pmdp)
+{
+ register unsigned long reg2 asm("2") = pmd_val(*pmdp);
+ register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
+ _SEGMENT_ENTRY_INVALID;
+ register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
+
+ asm volatile(
+ " csp %1,%3"
+ : "=m" (*pmdp)
+ : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+}
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
/*
- * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
+ * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
* Convert to segment table entry format.
*/
if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
return pgprot_val(SEGMENT_NONE);
- if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
- return pgprot_val(SEGMENT_RO);
- return pgprot_val(SEGMENT_RW);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
+ return pgprot_val(SEGMENT_READ);
+ return pgprot_val(SEGMENT_WRITE);
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+#ifdef CONFIG_64BIT
+ if (pmd_prot_none(pmd)) {
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ } else {
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
+ }
+#endif
+ return pmd;
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+#ifdef CONFIG_64BIT
+ if (pmd_prot_none(pmd)) {
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
+ } else {
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
+ }
+#endif
+ return pmd;
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
+ int young;
+
+ young = pmd_young(pmd);
pmd_val(pmd) &= _SEGMENT_CHG_MASK;
pmd_val(pmd) |= massage_pgprot_pmd(newprot);
+ if (young)
+ pmd = pmd_mkyoung(pmd);
return pmd;
}
@@ -1349,18 +1430,29 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
{
pmd_t __pmd;
pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
- return __pmd;
+ return pmd_mkyoung(__pmd);
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
- /* Do not clobber _HPAGE_TYPE_NONE pages! */
- if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
+ /* Do not clobber PROT_NONE segments! */
+ if (!pmd_prot_none(pmd))
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
return pmd;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
+static inline void pmdp_flush_lazy(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ int active = (mm == current->active_mm) ? 1 : 0;
+
+ if ((atomic_read(&mm->context.attach_count) & 0xffff) > active)
+ __pmd_idte(address, pmdp);
+ else
+ mm->context.flush_mm = 1;
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PGTABLE_DEPOSIT
@@ -1378,7 +1470,7 @@ static inline int pmd_trans_splitting(pmd_t pmd)
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
{
- if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
+ if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
pmd_val(entry) |= _SEGMENT_ENTRY_CO;
*pmdp = entry;
}
@@ -1391,7 +1483,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
- pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
+ /* Do not clobber PROT_NONE segments! */
+ if (!pmd_prot_none(pmd))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
return pmd;
}
@@ -1401,50 +1495,16 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
return pmd;
}
-static inline pmd_t pmd_mkold(pmd_t pmd)
-{
- /* No referenced bit in the segment table entry. */
- return pmd;
-}
-
-static inline pmd_t pmd_mkyoung(pmd_t pmd)
-{
- /* No referenced bit in the segment table entry. */
- return pmd;
-}
-
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
- unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
- long tmp, rc;
- int counter;
+ pmd_t pmd;
- rc = 0;
- if (MACHINE_HAS_RRBM) {
- counter = PTRS_PER_PTE >> 6;
- asm volatile(
- "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
- " ogr %1,%0\n"
- " la %3,0(%4,%3)\n"
- " brct %2,0b\n"
- : "=&d" (tmp), "+&d" (rc), "+d" (counter),
- "+a" (pmd_addr)
- : "a" (64 * 4096UL) : "cc");
- rc = !!rc;
- } else {
- counter = PTRS_PER_PTE;
- asm volatile(
- "0: rrbe 0,%2\n"
- " la %2,0(%3,%2)\n"
- " brc 12,1f\n"
- " lhi %0,1\n"
- "1: brct %1,0b\n"
- : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
- : "a" (4096UL) : "cc");
- }
- return rc;
+ pmd = *pmdp;
+ __pmd_idte(address, pmdp);
+ *pmdp = pmd_mkold(pmd);
+ return pmd_young(pmd);
}
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
@@ -1510,10 +1570,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
* exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary
* information in the lowcore.
- * Bit 21 and bit 22 are the page invalid bit and the page protection
- * bit. We set both to indicate a swapped page.
- * Bit 30 and 31 are used to distinguish the different page types. For
- * a swapped page these bits need to be zero.
+ * Bits 21, 22, 30 and 31 are used to indicate the page type.
+ * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
* This leaves the bits 1-19 and bits 24-29 to store type and offset.
* We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
* plus 24 for the offset.
@@ -1527,10 +1585,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
* exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary
* information in the lowcore.
- * Bit 53 and bit 54 are the page invalid bit and the page protection
- * bit. We set both to indicate a swapped page.
- * Bit 62 and 63 are used to distinguish the different page types. For
- * a swapped page these bits need to be zero.
+ * Bits 53, 54, 62 and 63 are used to indicate the page type.
+ * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
* This leaves the bits 0-51 and bits 56-61 to store type and offset.
* We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
* plus 56 for the offset.
@@ -1547,7 +1603,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;
offset &= __SWP_OFFSET_MASK;
- pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
+ pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
return pte;
}
@@ -1570,7 +1626,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define pgoff_to_pte(__off) \
((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
- | _PAGE_TYPE_FILE })
+ | _PAGE_INVALID | _PAGE_PROTECT })
#endif /* !__ASSEMBLY__ */