summaryrefslogtreecommitdiff
path: root/arch/parisc/include/asm/pgalloc.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include/asm/pgalloc.h')
-rw-r--r--arch/parisc/include/asm/pgalloc.h76
1 files changed, 18 insertions, 58 deletions
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index a6482b2ce0ea..dda557085311 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -15,47 +15,23 @@
#define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h>
-/* Allocate the top level pgd (page directory)
- *
- * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
- * allocate the first pmd adjacent to the pgd. This means that we can
- * subtract a constant offset to get to it. The pmd and pgd sizes are
- * arranged so that a single pmd covers 4GB (giving a full 64-bit
- * process access to 8TB) so our lookups are effectively L2 for the
- * first 4GB of the kernel (i.e. for all ILP32 processes and all the
- * kernel for machines with under 4GB of memory) */
+/* Allocate the top level pgd (page directory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
- PGD_ALLOC_ORDER);
- pgd_t *actual_pgd = pgd;
+ pgd_t *pgd;
- if (likely(pgd != NULL)) {
- memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#if CONFIG_PGTABLE_LEVELS == 3
- actual_pgd += PTRS_PER_PGD;
- /* Populate first pmd with allocated memory. We mark it
- * with PxD_FLAG_ATTACHED as a signal to the system that this
- * pmd entry may not be cleared. */
- set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT |
- PxD_FLAG_VALID |
- PxD_FLAG_ATTACHED)
- + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)));
- /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
- * a signal that this pmd may not be freed */
- set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED));
-#endif
- }
- spin_lock_init(pgd_spinlock(actual_pgd));
- return actual_pgd;
+ pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ if (unlikely(pgd == NULL))
+ return NULL;
+
+ memset(pgd, 0, PAGE_SIZE << PGD_ORDER);
+
+ return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
-#if CONFIG_PGTABLE_LEVELS == 3
- pgd -= PTRS_PER_PGD;
-#endif
- free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
+ free_pages((unsigned long)pgd, PGD_ORDER);
}
#if CONFIG_PGTABLE_LEVELS == 3
@@ -70,41 +46,25 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
+ pmd_t *pmd;
+
+ pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
+ if (likely(pmd))
+ memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER);
+ return pmd;
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
- if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
- /*
- * This is the permanent pmd attached to the pgd;
- * cannot free it.
- * Increment the counter to compensate for the decrement
- * done by generic mm code.
- */
- mm_inc_nr_pmds(mm);
- return;
- }
free_pages((unsigned long)pmd, PMD_ORDER);
}
-
#endif
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
-#if CONFIG_PGTABLE_LEVELS == 3
- /* preserve the gateway marker if this is the beginning of
- * the permanent pmd */
- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
- set_pmd(pmd, __pmd((PxD_FLAG_PRESENT |
- PxD_FLAG_VALID |
- PxD_FLAG_ATTACHED)
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
- else
-#endif
- set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
+ set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+ + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
}
#define pmd_populate(mm, pmd, pte_page) \