summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/tlb_nohash.c
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2009-10-17 03:48:40 +0400
committerKumar Gala <galak@kernel.crashing.org>2010-10-14 09:55:14 +0400
commit55fd766b5fad8240b7a6e994b5779a46d28f73d4 (patch)
treed00d9ddd5fb635d083e573d68675115489c46f19 /arch/powerpc/mm/tlb_nohash.c
parent988cf86d4f0da4150e808300c145ba87c0aad02f (diff)
downloadlinux-55fd766b5fad8240b7a6e994b5779a46d28f73d4.tar.xz
powerpc/fsl-booke64: Use TLB CAMs to cover linear mapping on FSL 64-bit chips
On Freescale parts typically have TLB array for large mappings that we can bolt the linear mapping into. We utilize the code that already exists on PPC32 on the 64-bit side to setup the linear mapping to be cover by bolted TLB entries. We utilize a quarter of the variable size TLB array for this purpose. Additionally, we limit the amount of memory to what we can cover via bolted entries so we don't get secondary faults in the TLB miss handlers. We should fix this limitation in the future. Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/tlb_nohash.c')
-rw-r--r--arch/powerpc/mm/tlb_nohash.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 665189920762..61fe32a256da 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -541,6 +541,20 @@ static void __early_init_mmu(int boot_cpu)
*/
linear_map_top = memblock_end_of_DRAM();
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+ unsigned int num_cams;
+
+ /* use a quarter of the TLBCAM for bolted linear map */
+ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
+ linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
+
+ /* limit memory so we dont have linear faults */
+ memblock_enforce_memory_limit(linear_map_top);
+ memblock_analyze();
+ }
+#endif
+
/* A sync won't hurt us after mucking around with
* the MMU configuration
*/