summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/book3s/64/pgalloc.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/book3s/64/pgalloc.h')
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h84
1 files changed, 47 insertions, 37 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 4746bc68d446..391ed2c3b697 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/cpumask.h>
+#include <linux/kmemleak.h>
#include <linux/percpu.h>
struct vmemmap_backing {
@@ -42,7 +43,9 @@ extern struct kmem_cache *pgtable_cache[];
})
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
+extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
extern void pte_fragment_free(unsigned long *, int);
+extern void pmd_fragment_free(unsigned long *);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
@@ -80,8 +83,24 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
- memset(pgd, 0, PGD_TABLE_SIZE);
+ /*
+ * Don't scan the PGD for pointers, it contains references to PUDs but
+ * those references are not full pointers and so can't be recognised by
+ * kmemleak.
+ */
+ kmemleak_no_scan(pgd);
+ /*
+ * With hugetlb, we don't clear the second half of the page table.
+ * If we share the same slab cache with the pmd or pud level table,
+ * we need to make sure we zero out the full table on alloc.
+ * With 4K we don't store slot in the second half. Hence we don't
+ * need to do this for 4k.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
+ (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
+ memset(pgd, 0, PGD_TABLE_SIZE);
+#endif
return pgd;
}
@@ -99,8 +118,19 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ pud_t *pud;
+
+ pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ /*
+ * Tell kmemleak to ignore the PUD, that means don't scan it for
+ * pointers and don't consider it a leak. PUDs are typically only
+ * referred to by their PGD, but kmemleak is not able to recognise those
+ * as pointers, leading to false leak reports.
+ */
+ kmemleak_ignore(pud);
+
+ return pud;
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -114,36 +144,35 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
- unsigned long address)
+ unsigned long address)
{
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
- pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
+ pgtable_free_tlb(tlb, pud, PUD_INDEX);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ return pmd_fragment_alloc(mm, addr);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
- kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
+ pmd_fragment_free((unsigned long *)pmd);
}
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
- unsigned long address)
+ unsigned long address)
{
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
- return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
+ return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
}
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@@ -163,31 +192,6 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
return (pgtable_t)pmd_page_vaddr(pmd);
}
-#ifdef CONFIG_PPC_4K_PAGES
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
-{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
-{
- struct page *page;
- pte_t *pte;
-
- pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
- if (!pte)
- return NULL;
- page = virt_to_page(pte);
- if (!pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
- return pte;
-}
-#else /* if CONFIG_PPC_64K_PAGES */
-
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
@@ -199,7 +203,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
{
return (pgtable_t)pte_fragment_alloc(mm, address, 0);
}
-#endif
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
@@ -219,9 +222,16 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
- pgtable_free_tlb(tlb, table, 0);
+ pgtable_free_tlb(tlb, table, PTE_INDEX);
}
#define check_pgt_cache() do { } while (0)
+extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
+static inline void update_page_count(int psize, long count)
+{
+ if (IS_ENABLED(CONFIG_PROC_FS))
+ atomic_long_add(count, &direct_pages_count[psize]);
+}
+
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */