summaryrefslogtreecommitdiff
path: root/arch/mips/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 22:28:25 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 22:28:25 +0300
commitd9862cfbe2099deb83f0e9c1932c91f2d9c50464 (patch)
tree7092ef41113269f30b5429868a9d161e171c746d /arch/mips/include
parent8feed3efa8022107bcb3432ac3ec9917e078ae70 (diff)
parentaeb669d41ffabb91b1542f1f802cb12a989fced0 (diff)
downloadlinux-d9862cfbe2099deb83f0e9c1932c91f2d9c50464.tar.xz
Merge tag 'mips_5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS updates from Paul Burton: - Support for the MIPSr6 MemoryMapID register & Global INValidate TLB (GINVT) instructions, allowing for more efficient TLB maintenance when running on a CPU such as the I6500 that supports these. - Enable huge page support for MIPS64r6. - Optimize post-DMA cache sync by removing that code entirely for kernel configurations in which we know it won't be needed. - The number of pages allocated for interrupt stacks is now calculated correctly, where before we would wastefully allocate too much memory in some configurations. - The ath79 platform migrates to devicetree. - The bcm47xx platform sees fixes for the Buffalo WHR-G54S board. - The ingenic/jz4740 platform gains support for appended devicetrees. - The cavium_octeon, lantiq, loongson32 & sgi-ip27 platforms all see cleanups as do various pieces of core architecture code. * tag 'mips_5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (66 commits) MIPS: lantiq: Remove separate GPHY Firmware loader MIPS: ingenic: Add support for appended devicetree MIPS: SGI-IP27: rework HUB interrupts MIPS: SGI-IP27: do boot CPU init later MIPS: SGI-IP27: do xtalk scanning later MIPS: SGI-IP27: use pr_info/pr_emerg and pr_cont to fix output MIPS: SGI-IP27: clean up bridge access and header files MIPS: SGI-IP27: get rid of volatile and hubreg_t MIPS: irq: Allocate accurate order pages for irq stack MIPS: dma-noncoherent: Remove bogus condition in dma_sync_phys() MIPS: eBPF: Remove REG_32BIT_ZERO_EX MIPS: eBPF: Always return sign extended 32b values MIPS: CM: Fix indentation MIPS: BCM47XX: Fix/improve Buffalo WHR-G54S support MIPS: OCTEON: program rx/tx-delay always from DT MIPS: OCTEON: delete board-specific link status MIPS: OCTEON: don't lie about interface type of CN3005 board MIPS: OCTEON: warn if deprecated link status is being used MIPS: OCTEON: add fixed-link nodes to in-kernel device tree MIPS: Delete unused flush_cache_sigtramp() ...
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/barrier.h19
-rw-r--r--arch/mips/include/asm/cacheflush.h2
-rw-r--r--arch/mips/include/asm/cmpxchg.h104
-rw-r--r--arch/mips/include/asm/cpu-features.h13
-rw-r--r--arch/mips/include/asm/cpu.h1
-rw-r--r--arch/mips/include/asm/ginvt.h56
-rw-r--r--arch/mips/include/asm/irqflags.h2
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h4
-rw-r--r--arch/mips/include/asm/mach-ip27/irq.h12
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h9
-rw-r--r--arch/mips/include/asm/mach-loongson32/platform.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h11
-rw-r--r--arch/mips/include/asm/mmu.h6
-rw-r--r--arch/mips/include/asm/mmu_context.h139
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h12
-rw-r--r--arch/mips/include/asm/octeon/cvmx-smix-defs.h276
-rw-r--r--arch/mips/include/asm/pci/bridge.h206
-rw-r--r--arch/mips/include/asm/pgtable.h51
-rw-r--r--arch/mips/include/asm/smp-ops.h1
-rw-r--r--arch/mips/include/asm/sn/addrs.h72
-rw-r--r--arch/mips/include/asm/sn/arch.h2
-rw-r--r--arch/mips/include/asm/sn/io.h2
-rw-r--r--arch/mips/include/asm/sn/sn0/addrs.h5
-rw-r--r--arch/mips/include/asm/tlbflush.h5
25 files changed, 400 insertions, 615 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index f15d5db5dd67..87b86cdf126a 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -3,7 +3,6 @@ generated-y += syscall_table_32_o32.h
generated-y += syscall_table_64_n32.h
generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
-generic-(CONFIG_GENERIC_CSUM) += checksum.h
generic-y += current.h
generic-y += device.h
generic-y += dma-contiguous.h
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index b7f6ac5e513c..b865e317a14f 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -105,6 +105,20 @@
*/
#define STYPE_SYNC_MB 0x10
+/*
+ * stype 0x14 - A completion barrier specific to global invalidations
+ *
+ * When a sync instruction of this type completes any preceding GINVI or GINVT
+ * operation has been globalized & completed on all coherent CPUs. Anything
+ * that the GINV* instruction should invalidate will have been invalidated on
+ * all coherent CPUs when this instruction completes. It is implementation
+ * specific whether the GINV* instructions themselves will ensure completion,
+ * or this sync type will.
+ *
+ * In systems implementing global invalidates (ie. with Config5.GI == 2 or 3)
+ * this sync type also requires that previous SYNCI operations have completed.
+ */
+#define STYPE_GINV 0x14
#ifdef CONFIG_CPU_HAS_SYNC
#define __sync() \
@@ -258,6 +272,11 @@
#define loongson_llsc_mb() do { } while (0)
#endif
+static inline void sync_ginv(void)
+{
+ asm volatile("sync\t%0" :: "i"(STYPE_GINV));
+}
+
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 4812d1fed0c2..d687b40b9fbb 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -25,7 +25,6 @@
*
* MIPS specific flush operations:
*
- * - flush_cache_sigtramp() flush signal trampoline
* - flush_icache_all() flush the entire instruction cache
* - flush_data_cache_page() flushes a page from the data cache
* - __flush_icache_user_range(start, end) flushes range of user instructions
@@ -110,7 +109,6 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
-extern void (*flush_cache_sigtramp)(unsigned long addr);
extern void (*flush_icache_all)(void);
extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr);
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 638de0c25249..f345a873742d 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -36,6 +36,8 @@
*/
extern unsigned long __cmpxchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for cmpxchg");
+extern unsigned long __cmpxchg64_unsupported(void)
+ __compiletime_error("cmpxchg64 not available; cpu_has_64bits may be false");
extern unsigned long __xchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for xchg");
@@ -204,12 +206,102 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
cmpxchg((ptr), (o), (n)); \
})
#else
-#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#ifndef CONFIG_SMP
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
-#endif
-#endif
+
+# include <asm-generic/cmpxchg-local.h>
+# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+# ifdef CONFIG_SMP
+
+static inline unsigned long __cmpxchg64(volatile void *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long tmp, ret;
+ unsigned long flags;
+
+ /*
+ * The assembly below has to combine 32 bit values into a 64 bit
+ * register, and split 64 bit values from one register into two. If we
+ * were to take an interrupt in the middle of this we'd only save the
+ * least significant 32 bits of each register & probably clobber the
+ * most significant 32 bits of the 64 bit values we're using. In order
+ * to avoid this we must disable interrupts.
+ */
+ local_irq_save(flags);
+
+ asm volatile(
+ " .set push \n"
+ " .set " MIPS_ISA_ARCH_LEVEL " \n"
+ /* Load 64 bits from ptr */
+ "1: lld %L0, %3 # __cmpxchg64 \n"
+ /*
+ * Split the 64 bit value we loaded into the 2 registers that hold the
+ * ret variable.
+ */
+ " dsra %M0, %L0, 32 \n"
+ " sll %L0, %L0, 0 \n"
+ /*
+ * Compare ret against old, breaking out of the loop if they don't
+ * match.
+ */
+ " bne %M0, %M4, 2f \n"
+ " bne %L0, %L4, 2f \n"
+ /*
+ * Combine the 32 bit halves from the 2 registers that hold the new
+ * variable into a single 64 bit register.
+ */
+# if MIPS_ISA_REV >= 2
+ " move %L1, %L5 \n"
+ " dins %L1, %M5, 32, 32 \n"
+# else
+ " dsll %L1, %L5, 32 \n"
+ " dsrl %L1, %L1, 32 \n"
+ " .set noat \n"
+ " dsll $at, %M5, 32 \n"
+ " or %L1, %L1, $at \n"
+ " .set at \n"
+# endif
+ /* Attempt to store new at ptr */
+ " scd %L1, %2 \n"
+ /* If we failed, loop! */
+ "\t" __scbeqz " %L1, 1b \n"
+ " .set pop \n"
+ "2: \n"
+ : "=&r"(ret),
+ "=&r"(tmp),
+ "=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr)
+ : GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr),
+ "r" (old),
+ "r" (new)
+ : "memory");
+
+ local_irq_restore(flags);
+ return ret;
+}
+
+# define cmpxchg64(ptr, o, n) ({ \
+ unsigned long long __old = (__typeof__(*(ptr)))(o); \
+ unsigned long long __new = (__typeof__(*(ptr)))(n); \
+ __typeof__(*(ptr)) __res; \
+ \
+ /* \
+ * We can only use cmpxchg64 if we know that the CPU supports \
+ * 64-bits, ie. lld & scd. Our call to __cmpxchg64_unsupported \
+ * will cause a build error unless cpu_has_64bits is a \
+ * compile-time constant 1. \
+ */ \
+ if (cpu_has_64bits && kernel_uses_llsc) \
+ __res = __cmpxchg64((ptr), __old, __new); \
+ else \
+ __res = __cmpxchg64_unsupported(); \
+ \
+ __res; \
+})
+
+# else /* !CONFIG_SMP */
+# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+# endif /* !CONFIG_SMP */
+#endif /* !CONFIG_64BIT */
#undef __scbeqz
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 701e525641b8..6998a9796499 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -591,6 +591,19 @@
#endif /* CONFIG_MIPS_MT_SMP */
/*
+ * We only enable MMID support for configurations which natively support 64 bit
+ * atomics because getting good performance from the allocator relies upon
+ * efficient atomic64_*() functions.
+ */
+#ifndef cpu_has_mmid
+# ifdef CONFIG_GENERIC_ATOMIC64
+# define cpu_has_mmid 0
+# else
+# define cpu_has_mmid __isa_ge_and_opt(6, MIPS_CPU_MMID)
+# endif
+#endif
+
+/*
* Guest capabilities
*/
#ifndef cpu_guest_has_conf1
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 532b49b1dbb3..6ad7d3cabd91 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -422,6 +422,7 @@ enum cpu_type_enum {
MBIT_ULL(55) /* CPU shares FTLB entries with another */
#define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \
MBIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */
+#define MIPS_CPU_MMID MBIT_ULL(57) /* CPU supports MemoryMapIDs */
/*
* CPU ASE encodings
diff --git a/arch/mips/include/asm/ginvt.h b/arch/mips/include/asm/ginvt.h
new file mode 100644
index 000000000000..49c6dbe37338
--- /dev/null
+++ b/arch/mips/include/asm/ginvt.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MIPS_ASM_GINVT_H__
+#define __MIPS_ASM_GINVT_H__
+
+#include <asm/mipsregs.h>
+
+enum ginvt_type {
+ GINVT_FULL,
+ GINVT_VA,
+ GINVT_MMID,
+};
+
+#ifdef TOOLCHAIN_SUPPORTS_GINV
+# define _ASM_SET_GINV ".set ginv\n"
+#else
+_ASM_MACRO_1R1I(ginvt, rs, type,
+ _ASM_INSN_IF_MIPS(0x7c0000bd | (__rs << 21) | (\\type << 8))
+ _ASM_INSN32_IF_MM(0x0000717c | (__rs << 16) | (\\type << 9)));
+# define _ASM_SET_GINV
+#endif
+
+static inline void ginvt(unsigned long addr, enum ginvt_type type)
+{
+ asm volatile(
+ ".set push\n"
+ _ASM_SET_GINV
+ " ginvt %0, %1\n"
+ ".set pop"
+ : /* no outputs */
+ : "r"(addr), "i"(type)
+ : "memory");
+}
+
+static inline void ginvt_full(void)
+{
+ ginvt(0, GINVT_FULL);
+}
+
+static inline void ginvt_va(unsigned long addr)
+{
+ addr &= PAGE_MASK << 1;
+ ginvt(addr, GINVT_VA);
+}
+
+static inline void ginvt_mmid(void)
+{
+ ginvt(0, GINVT_MMID);
+}
+
+static inline void ginvt_va_mmid(unsigned long addr)
+{
+ addr &= PAGE_MASK << 1;
+ ginvt(addr, GINVT_VA | GINVT_MMID);
+}
+
+#endif /* __MIPS_ASM_GINVT_H__ */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9d3610be2323..f0b862a83816 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -41,7 +41,7 @@ static inline unsigned long arch_local_irq_save(void)
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#if defined(CONFIG_CPU_LOONGSON3)
+#if defined(CONFIG_CPU_LOONGSON3) || defined (CONFIG_CPU_LOONGSON1)
" mfc0 %[flags], $12 \n"
" di \n"
#else
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 73dcd63b8243..47e8827e9564 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -178,8 +178,4 @@ static inline u32 ath79_reset_rr(unsigned reg)
void ath79_device_reset_set(u32 mask);
void ath79_device_reset_clear(u32 mask);
-void ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3);
-void ath79_misc_irq_init(void __iomem *regs, int irq,
- int irq_base, bool is_ar71xx);
-
#endif /* __ASM_MACH_ATH79_H */
diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h
index b0b7261ff3ad..fd91c58aaf7d 100644
--- a/arch/mips/include/asm/mach-ip27/irq.h
+++ b/arch/mips/include/asm/mach-ip27/irq.h
@@ -10,13 +10,15 @@
#ifndef __ASM_MACH_IP27_IRQ_H
#define __ASM_MACH_IP27_IRQ_H
-/*
- * A hardwired interrupt number is completely stupid for this system - a
- * large configuration might have thousands if not tenthousands of
- * interrupts.
- */
#define NR_IRQS 256
#include_next <irq.h>
+#define IP27_HUB_PEND0_IRQ (MIPS_CPU_IRQ_BASE + 2)
+#define IP27_HUB_PEND1_IRQ (MIPS_CPU_IRQ_BASE + 3)
+#define IP27_RT_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 4)
+
+#define IP27_HUB_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
+#define IP27_HUB_IRQ_COUNT 128
+
#endif /* __ASM_MACH_IP27_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
index 2ed3094dee07..1cd6a23a84f2 100644
--- a/arch/mips/include/asm/mach-ip27/mmzone.h
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -8,20 +8,11 @@
#define pa_to_nid(addr) NASID_TO_COMPACT_NODEID(NASID_GET(addr))
-#define LEVELS_PER_SLICE 128
-
-struct slice_data {
- unsigned long irq_enable_mask[2];
- int level_to_irq[LEVELS_PER_SLICE];
-};
-
struct hub_data {
kern_vars_t kern_vars;
DECLARE_BITMAP(h_bigwin_used, HUB_NUM_BIG_WINDOW);
cpumask_t h_cpus;
unsigned long slice_map;
- unsigned long irq_alloc_mask[2];
- struct slice_data slice[2];
};
struct node_data {
diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h
index 8f8fa43ba095..15d1de2300fe 100644
--- a/arch/mips/include/asm/mach-loongson32/platform.h
+++ b/arch/mips/include/asm/mach-loongson32/platform.h
@@ -17,19 +17,15 @@
extern struct platform_device ls1x_uart_pdev;
extern struct platform_device ls1x_cpufreq_pdev;
-extern struct platform_device ls1x_dma_pdev;
extern struct platform_device ls1x_eth0_pdev;
extern struct platform_device ls1x_eth1_pdev;
extern struct platform_device ls1x_ehci_pdev;
extern struct platform_device ls1x_gpio0_pdev;
extern struct platform_device ls1x_gpio1_pdev;
-extern struct platform_device ls1x_nand_pdev;
extern struct platform_device ls1x_rtc_pdev;
extern struct platform_device ls1x_wdt_pdev;
void __init ls1x_clk_init(void);
-void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata);
-void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata);
void __init ls1x_rtc_set_extclk(struct platform_device *pdev);
void __init ls1x_serial_set_uartclk(struct platform_device *pdev);
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 402b80af91aa..1e6966e8527e 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -667,6 +667,7 @@
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14)
+#define MIPS_CONF5_MI (_ULCAST_(1) << 17)
#define MIPS_CONF5_CRCP (_ULCAST_(1) << 18)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
@@ -1247,6 +1248,13 @@ __asm__(".macro parse_r var r\n\t"
ENC \
".endm")
+/* Instructions with 1 register operand & 1 immediate operand */
+#define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #I2 "\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ ENC \
+ ".endm")
+
/* Instructions with 2 register operands */
#define _ASM_MACRO_2R(OP, R1, R2, ENC) \
__asm__(".macro " #OP " " #R1 ", " #R2 "\n\t" \
@@ -1603,6 +1611,9 @@ do { \
#define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3)
#define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val)
+#define read_c0_memorymapid() __read_32bit_c0_register($4, 5)
+#define write_c0_memorymapid(val) __write_32bit_c0_register($4, 5, val)
+
#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 88a108ce62c1..5df0238f639b 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -7,7 +7,11 @@
#include <linux/wait.h>
typedef struct {
- u64 asid[NR_CPUS];
+ union {
+ u64 asid[NR_CPUS];
+ atomic64_t mmid;
+ };
+
void *vdso;
/* lock to be held whilst modifying fp_bd_emupage_allocmap */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index a589585be21b..cddead91acd4 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -17,8 +17,10 @@
#include <linux/smp.h>
#include <linux/slab.h>
+#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/dsemul.h>
+#include <asm/ginvt.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
@@ -73,6 +75,19 @@ extern unsigned long pgd_current[];
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
/*
+ * The ginvt instruction will invalidate wired entries when its type field
+ * targets anything other than the entire TLB. That means that if we were to
+ * allow the kernel to create wired entries with the MMID of current->active_mm
+ * then those wired entries could be invalidated when we later use ginvt to
+ * invalidate TLB entries with that MMID.
+ *
+ * In order to prevent ginvt from trashing wired entries, we reserve one MMID
+ * for use by the kernel when creating wired entries. This MMID will never be
+ * assigned to a struct mm, and we'll never target it with a ginvt instruction.
+ */
+#define MMID_KERNEL_WIRED 0
+
+/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
@@ -88,7 +103,23 @@ static inline u64 asid_first_version(unsigned int cpu)
return ~asid_version_mask(cpu) + 1;
}
-#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
+static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
+{
+ if (cpu_has_mmid)
+ return atomic64_read(&mm->context.mmid);
+
+ return mm->context.asid[cpu];
+}
+
+static inline void set_cpu_context(unsigned int cpu,
+ struct mm_struct *mm, u64 ctx)
+{
+ if (cpu_has_mmid)
+ atomic64_set(&mm->context.mmid, ctx);
+ else
+ mm->context.asid[cpu] = ctx;
+}
+
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
#define cpu_asid(cpu, mm) \
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
@@ -97,21 +128,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-
-/* Normal, classic MIPS get_new_mmu_context */
-static inline void
-get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
-{
- u64 asid = asid_cache(cpu);
-
- if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
- if (cpu_has_vtag_icache)
- flush_icache_all();
- local_flush_tlb_all(); /* start new asid cycle */
- }
-
- cpu_context(cpu, mm) = asid_cache(cpu) = asid;
-}
+extern void get_new_mmu_context(struct mm_struct *mm);
+extern void check_mmu_context(struct mm_struct *mm);
+extern void check_switch_mmu_context(struct mm_struct *mm);
/*
* Initialize the context related info for a new mm_struct
@@ -122,8 +141,12 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int i;
- for_each_possible_cpu(i)
- cpu_context(i, mm) = 0;
+ if (cpu_has_mmid) {
+ set_cpu_context(0, mm, 0);
+ } else {
+ for_each_possible_cpu(i)
+ set_cpu_context(i, mm, 0);
+ }
mm->context.bd_emupage_allocmap = NULL;
spin_lock_init(&mm->context.bd_emupage_lock);
@@ -140,11 +163,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
local_irq_save(flags);
htw_stop();
- /* Check if our ASID is of an older version and thus invalid */
- if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
- get_new_mmu_context(next, cpu);
- write_c0_entryhi(cpu_asid(cpu, next));
- TLBMISS_HANDLER_SETUP_PGD(next->pgd);
+ check_switch_mmu_context(next);
/*
* Mark current->active_mm as not "active" anymore.
@@ -166,55 +185,55 @@ static inline void destroy_context(struct mm_struct *mm)
dsemul_mm_cleanup(mm);
}
+#define activate_mm(prev, next) switch_mm(prev, next, current)
#define deactivate_mm(tsk, mm) do { } while (0)
-/*
- * After we have set current->mm to a new value, this activates
- * the context for the new mm so we see the new mappings.
- */
-static inline void
-activate_mm(struct mm_struct *prev, struct mm_struct *next)
-{
- unsigned long flags;
- unsigned int cpu = smp_processor_id();
-
- local_irq_save(flags);
-
- htw_stop();
- /* Unconditionally get a new ASID. */
- get_new_mmu_context(next, cpu);
-
- write_c0_entryhi(cpu_asid(cpu, next));
- TLBMISS_HANDLER_SETUP_PGD(next->pgd);
-
- /* mark mmu ownership change */
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
- cpumask_set_cpu(cpu, mm_cpumask(next));
- htw_start();
-
- local_irq_restore(flags);
-}
-
-/*
- * If mm is currently active_mm, we can't really drop it. Instead,
- * we will get a new one for it.
- */
static inline void
-drop_mmu_context(struct mm_struct *mm, unsigned cpu)
+drop_mmu_context(struct mm_struct *mm)
{
unsigned long flags;
+ unsigned int cpu;
+ u32 old_mmid;
+ u64 ctx;
local_irq_save(flags);
- htw_stop();
- if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
- get_new_mmu_context(mm, cpu);
+ cpu = smp_processor_id();
+ ctx = cpu_context(cpu, mm);
+
+ if (!ctx) {
+ /* no-op */
+ } else if (cpu_has_mmid) {
+ /*
+ * Globally invalidating TLB entries associated with the MMID
+ * is pretty cheap using the GINVT instruction, so we'll do
+ * that rather than incur the overhead of allocating a new
+ * MMID. The latter would be especially difficult since MMIDs
+ * are global & other CPUs may be actively using ctx.
+ */
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
+ mtc0_tlbw_hazard();
+ ginvt_mmid();
+ sync_ginv();
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ /*
+ * mm is currently active, so we can't really drop it.
+ * Instead we bump the ASID.
+ */
+ htw_stop();
+ get_new_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm));
+ htw_start();
} else {
/* will get a new context next time */
- cpu_context(cpu, mm) = 0;
+ set_cpu_context(cpu, mm, 0);
}
- htw_start();
+
local_irq_restore(flags);
}
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index b4d19c21b62c..d7fdcf0a0088 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -119,18 +119,6 @@ extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
extern int __cvmx_helper_board_interface_probe(int interface,
int supported_ports);
-/**
- * Enable packet input/output from the hardware. This function is
- * called after by cvmx_helper_packet_hardware_enable() to
- * perform board specific initialization. For most boards
- * nothing is needed.
- *
- * @interface: Interface to enable
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_board_hardware_enable(int interface);
-
enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void);
#endif /* __CVMX_HELPER_BOARD_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-smix-defs.h b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
deleted file mode 100644
index 7a928230b0c0..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-smix-defs.h
+++ /dev/null
@@ -1,276 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_SMIX_DEFS_H__
-#define __CVMX_SMIX_DEFS_H__
-
-static inline uint64_t CVMX_SMIX_CLK(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003818ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
-}
-
-static inline uint64_t CVMX_SMIX_CMD(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003800ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
-}
-
-static inline uint64_t CVMX_SMIX_EN(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003820ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
-}
-
-static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003810ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
-}
-
-static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003808ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
-}
-
-union cvmx_smix_clk {
- uint64_t u64;
- struct cvmx_smix_clk_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_25_63:39;
- uint64_t mode:1;
- uint64_t reserved_21_23:3;
- uint64_t sample_hi:5;
- uint64_t sample_mode:1;
- uint64_t reserved_14_14:1;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
-#else
- uint64_t phase:8;
- uint64_t sample:4;
- uint64_t preamble:1;
- uint64_t clk_idle:1;
- uint64_t reserved_14_14:1;
- uint64_t sample_mode:1;
- uint64_t sample_hi:5;
- uint64_t reserved_21_23:3;
- uint64_t mode:1;
- uint64_t reserved_25_63:39;
-#endif
- } s;
- struct cvmx_smix_clk_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_21_63:43;
- uint64_t sample_hi:5;
- uint64_t sample_mode:1;
- uint64_t reserved_14_14:1;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
-#else
- uint64_t phase:8;
- uint64_t sample:4;
- uint64_t preamble:1;
- uint64_t clk_idle:1;
- uint64_t reserved_14_14:1;
- uint64_t sample_mode:1;
- uint64_t sample_hi:5;
- uint64_t reserved_21_63:43;
-#endif
- } cn30xx;
-};
-
-union cvmx_smix_cmd {
- uint64_t u64;
- struct cvmx_smix_cmd_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t phy_op:2;
- uint64_t reserved_13_15:3;
- uint64_t phy_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t reg_adr:5;
-#else
- uint64_t reg_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t phy_adr:5;
- uint64_t reserved_13_15:3;
- uint64_t phy_op:2;
- uint64_t reserved_18_63:46;
-#endif
- } s;
- struct cvmx_smix_cmd_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t phy_op:1;
- uint64_t reserved_13_15:3;
- uint64_t phy_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t reg_adr:5;
-#else
- uint64_t reg_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t phy_adr:5;
- uint64_t reserved_13_15:3;
- uint64_t phy_op:1;
- uint64_t reserved_17_63:47;
-#endif
- } cn30xx;
-};
-
-union cvmx_smix_en {
- uint64_t u64;
- struct cvmx_smix_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t en:1;
-#else
- uint64_t en:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
-};
-
-union cvmx_smix_rd_dat {
- uint64_t u64;
- struct cvmx_smix_rd_dat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t pending:1;
- uint64_t val:1;
- uint64_t dat:16;
-#else
- uint64_t dat:16;
- uint64_t val:1;
- uint64_t pending:1;
- uint64_t reserved_18_63:46;
-#endif
- } s;
-};
-
-union cvmx_smix_wr_dat {
- uint64_t u64;
- struct cvmx_smix_wr_dat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t pending:1;
- uint64_t val:1;
- uint64_t dat:16;
-#else
- uint64_t dat:16;
- uint64_t val:1;
- uint64_t pending:1;
- uint64_t reserved_18_63:46;
-#endif
- } s;
-};
-
-#endif
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 3206245d1ed6..23574c27eb40 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -45,18 +45,21 @@
#ifndef __ASSEMBLY__
-/*
- * All accesses to bridge hardware registers must be done
- * using 32-bit loads and stores.
- */
-typedef u32 bridgereg_t;
+#define ATE_V 0x01
+#define ATE_CO 0x02
+#define ATE_PREC 0x04
+#define ATE_PREF 0x08
+#define ATE_BAR 0x10
+
+#define ATE_PFNSHIFT 12
+#define ATE_TIDSHIFT 8
+#define ATE_RMFSHIFT 48
-typedef u64 bridge_ate_t;
+#define mkate(xaddr, xid, attr) (((xaddr) & 0x0000fffffffff000ULL) | \
+ ((xid)<<ATE_TIDSHIFT) | \
+ (attr))
-/* pointers to bridge ATEs
- * are always "pointer to volatile"
- */
-typedef volatile bridge_ate_t *bridge_ate_p;
+#define BRIDGE_INTERNAL_ATES 128
/*
* It is generally preferred that hardware registers on the bridge
@@ -65,7 +68,7 @@ typedef volatile bridge_ate_t *bridge_ate_p;
* Generated from Bridge spec dated 04oct95
*/
-typedef volatile struct bridge_s {
+struct bridge_regs {
/* Local Registers 0x000000-0x00FFFF */
/* standard widget configuration 0x000000-0x000057 */
@@ -86,105 +89,105 @@ typedef volatile struct bridge_s {
#define b_wid_tflush b_widget.w_tflush
/* bridge-specific widget configuration 0x000058-0x00007F */
- bridgereg_t _pad_000058;
- bridgereg_t b_wid_aux_err; /* 0x00005C */
- bridgereg_t _pad_000060;
- bridgereg_t b_wid_resp_upper; /* 0x000064 */
- bridgereg_t _pad_000068;
- bridgereg_t b_wid_resp_lower; /* 0x00006C */
- bridgereg_t _pad_000070;
- bridgereg_t b_wid_tst_pin_ctrl; /* 0x000074 */
- bridgereg_t _pad_000078[2];
+ u32 _pad_000058;
+ u32 b_wid_aux_err; /* 0x00005C */
+ u32 _pad_000060;
+ u32 b_wid_resp_upper; /* 0x000064 */
+ u32 _pad_000068;
+ u32 b_wid_resp_lower; /* 0x00006C */
+ u32 _pad_000070;
+ u32 b_wid_tst_pin_ctrl; /* 0x000074 */
+ u32 _pad_000078[2];
/* PMU & Map 0x000080-0x00008F */
- bridgereg_t _pad_000080;
- bridgereg_t b_dir_map; /* 0x000084 */
- bridgereg_t _pad_000088[2];
+ u32 _pad_000080;
+ u32 b_dir_map; /* 0x000084 */
+ u32 _pad_000088[2];
/* SSRAM 0x000090-0x00009F */
- bridgereg_t _pad_000090;
- bridgereg_t b_ram_perr; /* 0x000094 */
- bridgereg_t _pad_000098[2];
+ u32 _pad_000090;
+ u32 b_ram_perr; /* 0x000094 */
+ u32 _pad_000098[2];
/* Arbitration 0x0000A0-0x0000AF */
- bridgereg_t _pad_0000A0;
- bridgereg_t b_arb; /* 0x0000A4 */
- bridgereg_t _pad_0000A8[2];
+ u32 _pad_0000A0;
+ u32 b_arb; /* 0x0000A4 */
+ u32 _pad_0000A8[2];
/* Number In A Can 0x0000B0-0x0000BF */
- bridgereg_t _pad_0000B0;
- bridgereg_t b_nic; /* 0x0000B4 */
- bridgereg_t _pad_0000B8[2];
+ u32 _pad_0000B0;
+ u32 b_nic; /* 0x0000B4 */
+ u32 _pad_0000B8[2];
/* PCI/GIO 0x0000C0-0x0000FF */
- bridgereg_t _pad_0000C0;
- bridgereg_t b_bus_timeout; /* 0x0000C4 */
+ u32 _pad_0000C0;
+ u32 b_bus_timeout; /* 0x0000C4 */
#define b_pci_bus_timeout b_bus_timeout
- bridgereg_t _pad_0000C8;
- bridgereg_t b_pci_cfg; /* 0x0000CC */
- bridgereg_t _pad_0000D0;
- bridgereg_t b_pci_err_upper; /* 0x0000D4 */
- bridgereg_t _pad_0000D8;
- bridgereg_t b_pci_err_lower; /* 0x0000DC */
- bridgereg_t _pad_0000E0[8];
+ u32 _pad_0000C8;
+ u32 b_pci_cfg; /* 0x0000CC */
+ u32 _pad_0000D0;
+ u32 b_pci_err_upper; /* 0x0000D4 */
+ u32 _pad_0000D8;
+ u32 b_pci_err_lower; /* 0x0000DC */
+ u32 _pad_0000E0[8];
#define b_gio_err_lower b_pci_err_lower
#define b_gio_err_upper b_pci_err_upper
/* Interrupt 0x000100-0x0001FF */
- bridgereg_t _pad_000100;
- bridgereg_t b_int_status; /* 0x000104 */
- bridgereg_t _pad_000108;
- bridgereg_t b_int_enable; /* 0x00010C */
- bridgereg_t _pad_000110;
- bridgereg_t b_int_rst_stat; /* 0x000114 */
- bridgereg_t _pad_000118;
- bridgereg_t b_int_mode; /* 0x00011C */
- bridgereg_t _pad_000120;
- bridgereg_t b_int_device; /* 0x000124 */
- bridgereg_t _pad_000128;
- bridgereg_t b_int_host_err; /* 0x00012C */
+ u32 _pad_000100;
+ u32 b_int_status; /* 0x000104 */
+ u32 _pad_000108;
+ u32 b_int_enable; /* 0x00010C */
+ u32 _pad_000110;
+ u32 b_int_rst_stat; /* 0x000114 */
+ u32 _pad_000118;
+ u32 b_int_mode; /* 0x00011C */
+ u32 _pad_000120;
+ u32 b_int_device; /* 0x000124 */
+ u32 _pad_000128;
+ u32 b_int_host_err; /* 0x00012C */
struct {
- bridgereg_t __pad; /* 0x0001{30,,,68} */
- bridgereg_t addr; /* 0x0001{34,,,6C} */
+ u32 __pad; /* 0x0001{30,,,68} */
+ u32 addr; /* 0x0001{34,,,6C} */
} b_int_addr[8]; /* 0x000130 */
- bridgereg_t _pad_000170[36];
+ u32 _pad_000170[36];
/* Device 0x000200-0x0003FF */
struct {
- bridgereg_t __pad; /* 0x0002{00,,,38} */
- bridgereg_t reg; /* 0x0002{04,,,3C} */
+ u32 __pad; /* 0x0002{00,,,38} */
+ u32 reg; /* 0x0002{04,,,3C} */
} b_device[8]; /* 0x000200 */
struct {
- bridgereg_t __pad; /* 0x0002{40,,,78} */
- bridgereg_t reg; /* 0x0002{44,,,7C} */
+ u32 __pad; /* 0x0002{40,,,78} */
+ u32 reg; /* 0x0002{44,,,7C} */
} b_wr_req_buf[8]; /* 0x000240 */
struct {
- bridgereg_t __pad; /* 0x0002{80,,,88} */
- bridgereg_t reg; /* 0x0002{84,,,8C} */
+ u32 __pad; /* 0x0002{80,,,88} */
+ u32 reg; /* 0x0002{84,,,8C} */
} b_rrb_map[2]; /* 0x000280 */
#define b_even_resp b_rrb_map[0].reg /* 0x000284 */
#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */
- bridgereg_t _pad_000290;
- bridgereg_t b_resp_status; /* 0x000294 */
- bridgereg_t _pad_000298;
- bridgereg_t b_resp_clear; /* 0x00029C */
+ u32 _pad_000290;
+ u32 b_resp_status; /* 0x000294 */
+ u32 _pad_000298;
+ u32 b_resp_clear; /* 0x00029C */
- bridgereg_t _pad_0002A0[24];
+ u32 _pad_0002A0[24];
char _pad_000300[0x10000 - 0x000300];
/* Internal Address Translation Entry RAM 0x010000-0x0103FF */
union {
- bridge_ate_t wr; /* write-only */
+ u64 wr; /* write-only */
struct {
- bridgereg_t _p_pad;
- bridgereg_t rd; /* read-only */
+ u32 _p_pad;
+ u32 rd; /* read-only */
} hi;
} b_int_ate_ram[128];
@@ -192,8 +195,8 @@ typedef volatile struct bridge_s {
/* Internal Address Translation Entry RAM LOW 0x011000-0x0113FF */
struct {
- bridgereg_t _p_pad;
- bridgereg_t rd; /* read-only */
+ u32 _p_pad;
+ u32 rd; /* read-only */
} b_int_ate_ram_lo[128];
char _pad_011400[0x20000 - 0x011400];
@@ -212,7 +215,7 @@ typedef volatile struct bridge_s {
} f[8];
} b_type0_cfg_dev[8]; /* 0x020000 */
- /* PCI Type 1 Configuration Space 0x028000-0x028FFF */
+ /* PCI Type 1 Configuration Space 0x028000-0x028FFF */
union { /* make all access sizes available. */
u8 c[0x1000 / 1];
u16 s[0x1000 / 2];
@@ -233,7 +236,7 @@ typedef volatile struct bridge_s {
u8 _pad_030007[0x04fff8]; /* 0x030008-0x07FFFF */
/* External Address Translation Entry RAM 0x080000-0x0FFFFF */
- bridge_ate_t b_ext_ate_ram[0x10000];
+ u64 b_ext_ate_ram[0x10000];
/* Reserved 0x100000-0x1FFFFF */
char _pad_100000[0x200000-0x100000];
@@ -259,13 +262,13 @@ typedef volatile struct bridge_s {
u32 l[0x400000 / 4]; /* read-only */
u64 d[0x400000 / 8]; /* read-only */
} b_external_flash; /* 0xC00000 */
-} bridge_t;
+};
/*
* Field formats for Error Command Word and Auxiliary Error Command Word
* of bridge.
*/
-typedef struct bridge_err_cmdword_s {
+struct bridge_err_cmdword {
union {
u32 cmd_word;
struct {
@@ -282,7 +285,7 @@ typedef struct bridge_err_cmdword_s {
rsvd:8;
} berr_st;
} berr_un;
-} bridge_err_cmdword_t;
+};
#define berr_field berr_un.berr_st
#endif /* !__ASSEMBLY__ */
@@ -290,7 +293,7 @@ typedef struct bridge_err_cmdword_s {
/*
* The values of these macros can and should be crosschecked
* regularly against the offsets of the like-named fields
- * within the "bridge_t" structure above.
+ * within the bridge_regs structure above.
*/
/* Byte offset macros for Bridge internal registers */
@@ -797,49 +800,14 @@ typedef struct bridge_err_cmdword_s {
#define PCI64_ATTR_RMF_MASK 0x00ff000000000000
#define PCI64_ATTR_RMF_SHFT 48
-#ifndef __ASSEMBLY__
-/* Address translation entry for mapped pci32 accesses */
-typedef union ate_u {
- u64 ent;
- struct ate_s {
- u64 rmf:16;
- u64 addr:36;
- u64 targ:4;
- u64 reserved:3;
- u64 barrier:1;
- u64 prefetch:1;
- u64 precise:1;
- u64 coherent:1;
- u64 valid:1;
- } field;
-} ate_t;
-#endif /* !__ASSEMBLY__ */
-
-#define ATE_V 0x01
-#define ATE_CO 0x02
-#define ATE_PREC 0x04
-#define ATE_PREF 0x08
-#define ATE_BAR 0x10
-
-#define ATE_PFNSHIFT 12
-#define ATE_TIDSHIFT 8
-#define ATE_RMFSHIFT 48
-
-#define mkate(xaddr, xid, attr) ((xaddr) & 0x0000fffffffff000ULL) | \
- ((xid)<<ATE_TIDSHIFT) | \
- (attr)
-
-#define BRIDGE_INTERNAL_ATES 128
-
struct bridge_controller {
struct pci_controller pc;
struct resource mem;
struct resource io;
struct resource busn;
- bridge_t *base;
+ struct bridge_regs *base;
nasid_t nasid;
unsigned int widget_id;
- unsigned int irq_cpu;
u64 baddr;
unsigned int pci_int[8];
};
@@ -847,8 +815,14 @@ struct bridge_controller {
#define BRIDGE_CONTROLLER(bus) \
((struct bridge_controller *)((bus)->sysdata))
-extern void register_bridge_irq(unsigned int irq);
-extern int request_bridge_irq(struct bridge_controller *bc);
+#define bridge_read(bc, reg) __raw_readl(&bc->base->reg)
+#define bridge_write(bc, reg, val) __raw_writel(val, &bc->base->reg)
+#define bridge_set(bc, reg, val) \
+ __raw_writel(__raw_readl(&bc->base->reg) | (val), &bc->base->reg)
+#define bridge_clr(bc, reg, val) \
+ __raw_writel(__raw_readl(&bc->base->reg) & ~(val), &bc->base->reg)
+
+extern int request_bridge_irq(struct bridge_controller *bc, int pin);
extern struct pci_ops bridge_pci_ops;
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 910851c62db3..4ccb465ef3f2 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -17,6 +17,7 @@
#include <asm/pgtable-64.h>
#endif
+#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/pgtable-bits.h>
@@ -204,51 +205,11 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
-#ifdef CONFIG_SMP
- /*
- * For SMP, multiple CPUs can race, so we need to do
- * this atomically.
- */
- unsigned long page_global = _PAGE_GLOBAL;
- unsigned long tmp;
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- __asm__ __volatile__ (
- " .set push \n"
- " .set arch=r4000 \n"
- " .set noreorder \n"
- "1:" __LL "%[tmp], %[buddy] \n"
- " bnez %[tmp], 2f \n"
- " or %[tmp], %[tmp], %[global] \n"
- __SC "%[tmp], %[buddy] \n"
- " beqzl %[tmp], 1b \n"
- " nop \n"
- "2: \n"
- " .set pop \n"
- : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
- : [global] "r" (page_global));
- } else if (kernel_uses_llsc) {
- loongson_llsc_mb();
- __asm__ __volatile__ (
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " .set noreorder \n"
- "1:" __LL "%[tmp], %[buddy] \n"
- " bnez %[tmp], 2f \n"
- " or %[tmp], %[tmp], %[global] \n"
- __SC "%[tmp], %[buddy] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- "2: \n"
- " .set pop \n"
- : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
- : [global] "r" (page_global));
- loongson_llsc_mb();
- }
-#else /* !CONFIG_SMP */
- if (pte_none(*buddy))
- pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
-#endif /* CONFIG_SMP */
+# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
+ cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
+# else
+ cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
+# endif
}
#endif
}
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index b7123f9c0785..65618ff1280c 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -29,6 +29,7 @@ struct plat_smp_ops {
int (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus);
+ void (*prepare_boot_cpu)(void);
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
diff --git a/arch/mips/include/asm/sn/addrs.h b/arch/mips/include/asm/sn/addrs.h
index 66814f8ba8e8..837d23e24976 100644
--- a/arch/mips/include/asm/sn/addrs.h
+++ b/arch/mips/include/asm/sn/addrs.h
@@ -27,16 +27,11 @@
#ifndef __ASSEMBLY__
-#define PS_UINT_CAST (unsigned long)
#define UINT64_CAST (unsigned long)
-#define HUBREG_CAST (volatile hubreg_t *)
-
#else /* __ASSEMBLY__ */
-#define PS_UINT_CAST
#define UINT64_CAST
-#define HUBREG_CAST
#endif /* __ASSEMBLY__ */
@@ -256,42 +251,23 @@
* Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
* They're always safe.
*/
-#define LOCAL_HUB_ADDR(_x) (HUBREG_CAST (IALIAS_BASE + (_x)))
-#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
- 0x800000 + (_x)))
-#ifdef CONFIG_SGI_IP27
-#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
- 0x800000 + (_x)))
-#endif /* CONFIG_SGI_IP27 */
+#define LOCAL_HUB_ADDR(_x) (IALIAS_BASE + (_x))
+#define REMOTE_HUB_ADDR(_n, _x) ((NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x)))
#ifndef __ASSEMBLY__
-#define HUB_L(_a) *(_a)
-#define HUB_S(_a, _d) *(_a) = (_d)
+#define LOCAL_HUB_PTR(_x) ((u64 *)LOCAL_HUB_ADDR((_x)))
+#define REMOTE_HUB_PTR(_n, _x) ((u64 *)REMOTE_HUB_ADDR((_n), (_x)))
-#define LOCAL_HUB_L(_r) HUB_L(LOCAL_HUB_ADDR(_r))
-#define LOCAL_HUB_S(_r, _d) HUB_S(LOCAL_HUB_ADDR(_r), (_d))
-#define REMOTE_HUB_L(_n, _r) HUB_L(REMOTE_HUB_ADDR((_n), (_r)))
-#define REMOTE_HUB_S(_n, _r, _d) HUB_S(REMOTE_HUB_ADDR((_n), (_r)), (_d))
-#define REMOTE_HUB_PI_L(_n, _sn, _r) HUB_L(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)))
-#define REMOTE_HUB_PI_S(_n, _sn, _r, _d) HUB_S(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)), (_d))
+#define LOCAL_HUB_L(_r) __raw_readq(LOCAL_HUB_PTR(_r))
+#define LOCAL_HUB_S(_r, _d) __raw_writeq((_d), LOCAL_HUB_PTR(_r))
+#define REMOTE_HUB_L(_n, _r) __raw_readq(REMOTE_HUB_PTR((_n), (_r)))
+#define REMOTE_HUB_S(_n, _r, _d) __raw_writeq((_d), \
+ REMOTE_HUB_PTR((_n), (_r)))
#endif /* !__ASSEMBLY__ */
/*
- * The following macros are used to get to a hub/bridge register, given
- * the base of the register space.
- */
-#define HUB_REG_PTR(_base, _off) \
- (HUBREG_CAST((__psunsigned_t)(_base) + (__psunsigned_t)(_off)))
-
-#define HUB_REG_PTR_L(_base, _off) \
- HUB_L(HUB_REG_PTR((_base), (_off)))
-
-#define HUB_REG_PTR_S(_base, _off, _data) \
- HUB_S(HUB_REG_PTR((_base), (_off)), (_data))
-
-/*
* Software structure locations -- permanently fixed
* See diagram in kldir.h
*/
@@ -387,44 +363,14 @@
#define SYMMON_STK_END(nasid) (SYMMON_STK_ADDR(nasid, 0) + KLD_SYMMON_STK(nasid)->size)
-/* loading symmon 4k below UNIX. the arcs loader needs the topaddr for a
- * relocatable program
- */
-#define UNIX_DEBUG_LOADADDR 0x300000
-#define SYMMON_LOADADDR(nasid) \
- TO_NODE(nasid, PHYS_TO_K0(UNIX_DEBUG_LOADADDR - 0x1000))
-
-#define FREEMEM_OFFSET(nasid) KLD_FREEMEM(nasid)->offset
-#define FREEMEM_ADDR(nasid) SYMMON_STK_END(nasid)
-/*
- * XXX
- * Fix this. FREEMEM_ADDR should be aware of if symmon is loaded.
- * Also, it should take into account what prom thinks to be a safe
- * address
- PHYS_TO_K0(NODE_OFFSET(nasid) + FREEMEM_OFFSET(nasid))
- */
-#define FREEMEM_SIZE(nasid) KLD_FREEMEM(nasid)->size
-
-#define PI_ERROR_OFFSET(nasid) KLD_PI_ERROR(nasid)->offset
-#define PI_ERROR_ADDR(nasid) \
- TO_NODE_UNCAC((nasid), PI_ERROR_OFFSET(nasid))
-#define PI_ERROR_SIZE(nasid) KLD_PI_ERROR(nasid)->size
-
#define NODE_OFFSET_TO_K0(_nasid, _off) \
PHYS_TO_K0((NODE_OFFSET(_nasid) + (_off)) | CAC_BASE)
#define NODE_OFFSET_TO_K1(_nasid, _off) \
TO_UNCAC((NODE_OFFSET(_nasid) + (_off)) | UNCAC_BASE)
-#define K0_TO_NODE_OFFSET(_k0addr) \
- ((__psunsigned_t)(_k0addr) & NODE_ADDRSPACE_MASK)
#define KERN_VARS_ADDR(nasid) KLD_KERN_VARS(nasid)->pointer
#define KERN_VARS_SIZE(nasid) KLD_KERN_VARS(nasid)->size
-#define KERN_XP_ADDR(nasid) KLD_KERN_XP(nasid)->pointer
-#define KERN_XP_SIZE(nasid) KLD_KERN_XP(nasid)->size
-
-#define GPDA_ADDR(nasid) TO_NODE_CAC(nasid, GPDA_OFFSET)
-
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h
index 471e6870d876..3f1fb1454749 100644
--- a/arch/mips/include/asm/sn/arch.h
+++ b/arch/mips/include/asm/sn/arch.h
@@ -17,8 +17,6 @@
#include <asm/sn/sn0/arch.h>
#endif
-typedef u64 hubreg_t;
-
#define cputonasid(cpu) (sn_cpu_info[(cpu)].p_nasid)
#define cputoslice(cpu) (sn_cpu_info[(cpu)].p_slice)
#define makespnum(_nasid, _slice) \
diff --git a/arch/mips/include/asm/sn/io.h b/arch/mips/include/asm/sn/io.h
index d5174d04538c..211f1e83b523 100644
--- a/arch/mips/include/asm/sn/io.h
+++ b/arch/mips/include/asm/sn/io.h
@@ -44,7 +44,7 @@
IIO_ITTE_PUT((nasid), HUB_PIO_MAP_TO_MEM, \
(bigwin), IIO_ITTE_INVALID_WIDGET, 0)
-#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_ADDR((nasid), IIO_ITTE(bigwin))
+#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_PTR((nasid), IIO_ITTE(bigwin))
/*
* Macro which takes the widget number, and returns the
diff --git a/arch/mips/include/asm/sn/sn0/addrs.h b/arch/mips/include/asm/sn/sn0/addrs.h
index 6b53070f400f..f13df84edfdd 100644
--- a/arch/mips/include/asm/sn/sn0/addrs.h
+++ b/arch/mips/include/asm/sn/sn0/addrs.h
@@ -134,11 +134,6 @@
#define CALIAS_BASE CAC_BASE
-
-
-#define BRIDGE_REG_PTR(_base, _off) ((volatile bridgereg_t *) \
- ((__psunsigned_t)(_base) + (__psunsigned_t)(_off)))
-
#define SN0_WIDGET_BASE(_nasid, _wid) (NODE_SWIN_BASE((_nasid), (_wid)))
/* Turn on sable logging for the processors whose bits are set. */
diff --git a/arch/mips/include/asm/tlbflush.h b/arch/mips/include/asm/tlbflush.h
index 40a361092491..9789e7a32def 100644
--- a/arch/mips/include/asm/tlbflush.h
+++ b/arch/mips/include/asm/tlbflush.h
@@ -14,7 +14,6 @@
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
*/
extern void local_flush_tlb_all(void);
-extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void local_flush_tlb_kernel_range(unsigned long start,
@@ -23,6 +22,8 @@ extern void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page);
extern void local_flush_tlb_one(unsigned long vaddr);
+#include <asm/mmu_context.h>
+
#ifdef CONFIG_SMP
extern void flush_tlb_all(void);
@@ -36,7 +37,7 @@ extern void flush_tlb_one(unsigned long vaddr);
#else /* CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
-#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_mm(mm) drop_mmu_context(mm)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
#define flush_tlb_kernel_range(vmaddr,end) \
local_flush_tlb_kernel_range(vmaddr, end)