summaryrefslogtreecommitdiff
path: root/arch/hexagon
diff options
context:
space:
mode:
authorRichard Kuo <rkuo@codeaurora.org>2012-12-01 00:53:56 +0400
committerRichard Kuo <rkuo@codeaurora.org>2013-05-01 04:40:24 +0400
commit20f704b69af63bffbc8e70bcf21990318a8912f5 (patch)
tree0c789768307de83e64e5d66b5d4d919d1fb57621 /arch/hexagon
parent0357d2f2a8cde3069d800eb63c1ee41fed31e1b1 (diff)
downloadlinux-20f704b69af63bffbc8e70bcf21990318a8912f5.tar.xz
Hexagon: fix initial page table setup prior to jump to VA
Use the exact number of pages needed to be mapped pre-VA-jump, then map 896MB afterwards, which the arch mem init will fix up. Signed-off-by: Richard Kuo <rkuo@codeaurora.org>
Diffstat (limited to 'arch/hexagon')
-rw-r--r--arch/hexagon/kernel/head.S85
1 files changed, 73 insertions, 12 deletions
diff --git a/arch/hexagon/kernel/head.S b/arch/hexagon/kernel/head.S
index 477320c455f7..8e88d246ca69 100644
--- a/arch/hexagon/kernel/head.S
+++ b/arch/hexagon/kernel/head.S
@@ -25,6 +25,9 @@
#include <asm/mem-layout.h>
#include <asm/vm_mmu.h>
#include <asm/page.h>
+#include <asm/hexagon_vm.h>
+
+#define SEGTABLE_ENTRIES #0x0e0
__INIT
ENTRY(stext)
@@ -63,27 +66,73 @@ ENTRY(stext)
| __HEXAGON_C_WB_L2 << 6 \
| __HVM_PDE_S_4MB)
- r1 = pc
- r2.H = #0xffc0
- r2.L = #0x0000
- r1 = and(r1,r2) /* round PC to 4MB boundary */
+ /*
+ * Get number of VA=PA entries; only really needed for jump
+ * to hyperspace; gets blown away immediately after
+ */
+
+ {
+ r1.l = #LO(_end);
+ r2.l = #LO(stext);
+ r3 = #1;
+ }
+ {
+ r1.h = #HI(_end);
+ r2.h = #HI(stext);
+ r3 = asl(r3, #22);
+ }
+ {
+ r1 = sub(r1, r2);
+ r3 = add(r3, #-1);
+ } /* r1 = _end - stext */
+ r1 = add(r1, r3); /* + (4M-1) */
+ r26 = lsr(r1, #22); /* / 4M = # of entries */
+
+ r1 = r25;
+ r2.h = #0xffc0;
+ r2.l = #0x0000; /* round back down to 4MB boundary */
+ r1 = and(r1,r2);
r2 = lsr(r1, #22) /* 4MB page number */
r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
r0 = add(r0,r2) /* r0 = address of correct PTE */
r2 = #PTE_BITS
r1 = add(r1,r2) /* r1 = 4MB PTE for the first entry */
r2.h = #0x0040
- r2.l = #0x0000 /* 4MB */
- memw(r0 ++ #4) = r1
- r1 = add(r1, r2)
+ r2.l = #0x0000 /* 4MB increments */
+ loop0(1f,r26);
+1:
memw(r0 ++ #4) = r1
+ { r1 = add(r1, r2); } :endloop0
- r0 = r24
+ /* Also need to overwrite the initial 0xc0000000 entries */
+ /* PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift) */
+ R1.H = #HI(PAGE_OFFSET >> (22 - 2))
+ R1.L = #LO(PAGE_OFFSET >> (22 - 2))
+
+ r0 = add(r1, r24); /* advance to 0xc0000000 entry */
+ r1 = r25;
+ r2.h = #0xffc0;
+ r2.l = #0x0000; /* round back down to 4MB boundary */
+ r1 = and(r1,r2); /* for huge page */
+ r2 = #PTE_BITS
+ r1 = add(r1,r2);
+ r2.h = #0x0040
+ r2.l = #0x0000 /* 4MB increments */
+
+ loop0(1f,SEGTABLE_ENTRIES);
+1:
+ memw(r0 ++ #4) = r1;
+ { r1 = add(r1,r2); } :endloop0
+
+ r0 = r24;
/*
* The subroutine wrapper around the virtual instruction touches
* no memory, so we should be able to use it even here.
+ * Note that in this version, R1 and R2 get "clobbered"; see
+ * vm_ops.S
*/
+ r1 = #VM_TRANS_TYPE_TABLE
call __vmnewmap;
/* Jump into virtual address range. */
@@ -97,17 +146,29 @@ ENTRY(stext)
__head_s_vaddr_target:
/*
* Tear down VA=PA translation now that we are running
- * in the desgnated kernel segments.
+ * in kernel virtual space.
*/
r0 = #__HVM_PDE_S_INVALID
- r1 = r24
- loop0(1f,#0x100)
+
+ r1.h = #0xffc0;
+ r1.l = #0x0000;
+ r2 = r25; /* phys_offset */
+ r2 = and(r1,r2);
+
+ r1.l = #lo(swapper_pg_dir)
+ r1.h = #hi(swapper_pg_dir)
+ r2 = lsr(r2, #22) /* 4MB page number */
+ r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
+ r1 = add(r1,r2);
+ loop0(1f,r26)
+
1:
{
memw(R1 ++ #4) = R0
}:endloop0
r0 = r24
+ r1 = #VM_TRANS_TYPE_TABLE
call __vmnewmap
/* Go ahead and install the trap0 return so angel calls work */
@@ -150,7 +211,7 @@ __head_s_vaddr_target:
r2 = sub(r2,r0);
call memset;
- /* Set PHYS_OFFSET; should still be in R25 */
+ /* Set PHYS_OFFSET; should be in R25 */
#ifdef CONFIG_HEXAGON_PHYS_OFFSET
r0.l = #LO(__phys_offset);
r0.h = #HI(__phys_offset);