summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2016-01-12 16:41:03 +0300
committerRussell King <rmk+kernel@arm.linux.org.uk>2016-01-12 16:41:03 +0300
commit6660800fb7fd0f66faecb3c550fe59709220ade5 (patch)
tree19f95c0579bc13ba8c1d326e5058330e2fd14d63 /arch/arm/mm
parent598bcc6ea6ec4032d2ace8b1b43d11b4708af072 (diff)
parent06312f44ad638c02e26c5f187c9ee8f94cbaa3a2 (diff)
downloadlinux-6660800fb7fd0f66faecb3c550fe59709220ade5.tar.xz
Merge branch 'devel-stable' into for-linus
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/init.c5
-rw-r--r--arch/arm/mm/ioremap.c9
-rw-r--r--arch/arm/mm/mmu.c128
3 files changed, 100 insertions, 42 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7f8cd1b3557f..49bd08178008 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -192,7 +192,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
- return memblock_is_memory(__pfn_to_phys(pfn));
+ return memblock_is_map_memory(__pfn_to_phys(pfn));
}
EXPORT_SYMBOL(pfn_valid);
#endif
@@ -433,6 +433,9 @@ static void __init free_highpages(void)
if (end <= max_low)
continue;
+ if (memblock_is_nomap(mem))
+ continue;
+
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0c81056c1dd7..66a978d05958 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -30,6 +30,7 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
+#include <asm/early_ioremap.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -469,3 +470,11 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
}
EXPORT_SYMBOL_GPL(pci_ioremap_io);
#endif
+
+/*
+ * Must be called after early_fixmap_init
+ */
+void __init early_ioremap_init(void)
+{
+ early_ioremap_setup();
+}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index de9f8921e407..a87f6cc3fa2b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -390,7 +390,7 @@ void __init early_fixmap_init(void)
* The early fixmap range spans multiple pmds, for which
* we are not prepared:
*/
- BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+ BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT)
!= FIXADDR_TOP >> PMD_SHIFT);
pmd = fixmap_pmd(FIXADDR_TOP);
@@ -724,30 +724,49 @@ static void __init *early_alloc(unsigned long sz)
return early_alloc_aligned(sz, sz);
}
-static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+static void *__init late_alloc(unsigned long sz)
+{
+ void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
+
+ BUG_ON(!ptr);
+ return ptr;
+}
+
+static pte_t * __init pte_alloc(pmd_t *pmd, unsigned long addr,
+ unsigned long prot,
+ void *(*alloc)(unsigned long sz))
{
if (pmd_none(*pmd)) {
- pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+ pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
__pmd_populate(pmd, __pa(pte), prot);
}
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
}
+static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
+ unsigned long prot)
+{
+ return pte_alloc(pmd, addr, prot, early_alloc);
+}
+
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
- const struct mem_type *type)
+ const struct mem_type *type,
+ void *(*alloc)(unsigned long sz),
+ bool ng)
{
- pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+ pte_t *pte = pte_alloc(pmd, addr, type->prot_l1, alloc);
do {
- set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
+ set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
+ ng ? PTE_EXT_NG : 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+ const struct mem_type *type, bool ng)
{
pmd_t *p = pmd;
@@ -765,7 +784,7 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
pmd++;
#endif
do {
- *pmd = __pmd(phys | type->prot_sect);
+ *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0));
phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end);
@@ -774,7 +793,8 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+ const struct mem_type *type,
+ void *(*alloc)(unsigned long sz), bool ng)
{
pmd_t *pmd = pmd_offset(pud, addr);
unsigned long next;
@@ -792,10 +812,10 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
*/
if (type->prot_sect &&
((addr | next | phys) & ~SECTION_MASK) == 0) {
- __map_init_section(pmd, addr, next, phys, type);
+ __map_init_section(pmd, addr, next, phys, type, ng);
} else {
alloc_init_pte(pmd, addr, next,
- __phys_to_pfn(phys), type);
+ __phys_to_pfn(phys), type, alloc, ng);
}
phys += next - addr;
@@ -805,21 +825,24 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+ const struct mem_type *type,
+ void *(*alloc)(unsigned long sz), bool ng)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
- alloc_init_pmd(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type, alloc, ng);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
#ifndef CONFIG_ARM_LPAE
-static void __init create_36bit_mapping(struct map_desc *md,
- const struct mem_type *type)
+static void __init create_36bit_mapping(struct mm_struct *mm,
+ struct map_desc *md,
+ const struct mem_type *type,
+ bool ng)
{
unsigned long addr, length, end;
phys_addr_t phys;
@@ -859,7 +882,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
*/
phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
- pgd = pgd_offset_k(addr);
+ pgd = pgd_offset(mm, addr);
end = addr + length;
do {
pud_t *pud = pud_offset(pgd, addr);
@@ -867,7 +890,8 @@ static void __init create_36bit_mapping(struct map_desc *md,
int i;
for (i = 0; i < 16; i++)
- *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
+ *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER |
+ (ng ? PMD_SECT_nG : 0));
addr += SUPERSECTION_SIZE;
phys += SUPERSECTION_SIZE;
@@ -876,33 +900,15 @@ static void __init create_36bit_mapping(struct map_desc *md,
}
#endif /* !CONFIG_ARM_LPAE */
-/*
- * Create the page directory entries and any necessary
- * page tables for the mapping specified by `md'. We
- * are able to cope here with varying sizes and address
- * offsets, and we take full advantage of sections and
- * supersections.
- */
-static void __init create_mapping(struct map_desc *md)
+static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
+ void *(*alloc)(unsigned long sz),
+ bool ng)
{
unsigned long addr, length, end;
phys_addr_t phys;
const struct mem_type *type;
pgd_t *pgd;
- if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
- pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
- return;
- }
-
- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
- md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
- (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
- pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
- }
-
type = &mem_types[md->type];
#ifndef CONFIG_ARM_LPAE
@@ -910,7 +916,7 @@ static void __init create_mapping(struct map_desc *md)
* Catch 36-bit addresses
*/
if (md->pfn >= 0x100000) {
- create_36bit_mapping(md, type);
+ create_36bit_mapping(mm, md, type, ng);
return;
}
#endif
@@ -925,12 +931,12 @@ static void __init create_mapping(struct map_desc *md)
return;
}
- pgd = pgd_offset_k(addr);
+ pgd = pgd_offset(mm, addr);
end = addr + length;
do {
unsigned long next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, type);
+ alloc_init_pud(pgd, addr, next, phys, type, alloc, ng);
phys += next - addr;
addr = next;
@@ -938,6 +944,43 @@ static void __init create_mapping(struct map_desc *md)
}
/*
+ * Create the page directory entries and any necessary
+ * page tables for the mapping specified by `md'. We
+ * are able to cope here with varying sizes and address
+ * offsets, and we take full advantage of sections and
+ * supersections.
+ */
+static void __init create_mapping(struct map_desc *md)
+{
+ if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
+ pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ return;
+ }
+
+ if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
+ (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
+ pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ }
+
+ __create_mapping(&init_mm, md, early_alloc, false);
+}
+
+void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
+ bool ng)
+{
+#ifdef CONFIG_ARM_LPAE
+ pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
+ if (WARN_ON(!pud))
+ return;
+ pmd_alloc(mm, pud, 0);
+#endif
+ __create_mapping(mm, md, late_alloc, ng);
+}
+
+/*
* Create the architecture specific mappings
*/
void __init iotable_init(struct map_desc *io_desc, int nr)
@@ -1392,6 +1435,9 @@ static void __init map_lowmem(void)
phys_addr_t end = start + reg->size;
struct map_desc map;
+ if (memblock_is_nomap(reg))
+ continue;
+
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)