summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/mips/Kconfig77
-rw-r--r--arch/mips/Makefile3
-rw-r--r--arch/mips/cpu/start.S31
-rw-r--r--arch/mips/cpu/time.c2
-rw-r--r--arch/mips/dts/Makefile1
-rw-r--r--arch/mips/dts/mrvl,cn73xx.dtsi64
-rw-r--r--arch/mips/dts/mrvl,octeon-ebb7304.dts96
-rw-r--r--arch/mips/include/asm/addrspace.h27
-rw-r--r--arch/mips/include/asm/asm.h130
-rw-r--r--arch/mips/include/asm/cm.h12
-rw-r--r--arch/mips/include/asm/compiler.h69
-rw-r--r--arch/mips/include/asm/isa-rev.h24
-rw-r--r--arch/mips/include/asm/mipsregs.h1201
-rw-r--r--arch/mips/include/asm/relocs.h2
-rw-r--r--arch/mips/lib/bootm.c4
-rw-r--r--arch/mips/lib/cache.c6
-rw-r--r--arch/mips/lib/cache_init.S38
-rw-r--r--arch/mips/lib/reloc.c7
-rw-r--r--arch/mips/lib/traps.c4
-rw-r--r--arch/mips/mach-octeon/Kconfig60
-rw-r--r--arch/mips/mach-octeon/Makefile10
-rw-r--r--arch/mips/mach-octeon/cache.c24
-rw-r--r--arch/mips/mach-octeon/clock.c14
-rw-r--r--arch/mips/mach-octeon/cpu.c66
-rw-r--r--arch/mips/mach-octeon/dram.c28
-rw-r--r--arch/mips/mach-octeon/include/ioremap.h30
-rw-r--r--arch/mips/mach-octeon/include/mach/cavm-reg.h17
-rw-r--r--arch/mips/mach-octeon/include/mach/clock.h12
-rw-r--r--arch/mips/mach-octeon/lowlevel_init.S69
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/cpu/Makefile2
-rw-r--r--arch/x86/cpu/apollolake/Kconfig1
-rw-r--r--arch/x86/cpu/cpu.c58
-rw-r--r--arch/x86/cpu/i386/cpu.c26
-rw-r--r--arch/x86/cpu/mp_init.c528
-rw-r--r--arch/x86/cpu/mtrr.c149
-rw-r--r--arch/x86/include/asm/mp.h137
-rw-r--r--arch/x86/include/asm/mtrr.h51
-rw-r--r--board/Marvell/octeon_ebb7304/Kconfig19
-rw-r--r--board/Marvell/octeon_ebb7304/MAINTAINERS7
-rw-r--r--board/Marvell/octeon_ebb7304/Makefile8
-rw-r--r--board/Marvell/octeon_ebb7304/board.c9
-rw-r--r--cmd/x86/mtrr.c148
-rw-r--r--configs/octeon_ebb7304_defconfig38
-rw-r--r--doc/board/google/chromebook_coral.rst1
-rw-r--r--drivers/core/acpi.c2
-rw-r--r--drivers/mtd/nand/spi/micron.c150
-rw-r--r--drivers/sysreset/Kconfig7
-rw-r--r--drivers/sysreset/Makefile1
-rw-r--r--drivers/sysreset/sysreset_octeon.c52
-rw-r--r--include/asm-generic/global_data.h1
-rw-r--r--include/configs/octeon_common.h19
-rw-r--r--include/configs/octeon_ebb7304.h20
-rw-r--r--include/linux/mtd/spinand.h1
-rw-r--r--scripts/config_whitelist.txt1
56 files changed, 2999 insertions, 579 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 2a281a9a0f..6316c6ca00 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -770,6 +770,13 @@ M: Ezequiel Garcia <ezequiel@collabora.com>
S: Maintained
F: arch/mips/mach-jz47xx/
+MIPS Octeon
+M: Aaron Williams <awilliams@marvell.com>
+S: Maintained
+F: arch/mips/mach-octeon/
+F: arch/mips/include/asm/arch-octeon/
+F: arch/mips/dts/mrvl,cn73xx.dtsi
+
MMC
M: Peng Fan <peng.fan@nxp.com>
S: Maintained
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 48e754cc46..997e145450 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -106,6 +106,26 @@ config ARCH_JZ47XX
select OF_CONTROL
select DM
+config ARCH_OCTEON
+ bool "Support Marvell Octeon CN7xxx platforms"
+ select CPU_CAVIUM_OCTEON
+ select DISPLAY_CPUINFO
+ select DMA_ADDR_T_64BIT
+ select DM
+ select DM_SERIAL
+ select DM_GPIO
+ select DM_ETH
+ select MIPS_L2_CACHE
+ select MIPS_MACH_EARLY_INIT
+ select MIPS_TUNE_OCTEON3
+ select ROM_EXCEPTION_VECTORS
+ select SUPPORTS_BIG_ENDIAN
+ select SUPPORTS_CPU_MIPS64_OCTEON
+ select PHYS_64BIT
+ select OF_CONTROL
+ select OF_LIVE
+ imply CMD_DM
+
config MACH_PIC32
bool "Support Microchip PIC32"
select DM
@@ -160,6 +180,7 @@ source "arch/mips/mach-bmips/Kconfig"
source "arch/mips/mach-jz47xx/Kconfig"
source "arch/mips/mach-pic32/Kconfig"
source "arch/mips/mach-mtmips/Kconfig"
+source "arch/mips/mach-octeon/Kconfig"
if MIPS
@@ -233,6 +254,14 @@ config CPU_MIPS64_R6
Choose this option to build a kernel for release 6 or later of the
MIPS64 architecture.
+config CPU_MIPS64_OCTEON
+ bool "Marvell Octeon series of CPUs"
+ depends on SUPPORTS_CPU_MIPS64_OCTEON
+ select 64BIT
+ help
+ Choose this option for Marvell Octeon CPUs. These CPUs are between
+ MIPS64 R5 and R6 with other extensions.
+
endchoice
menu "General setup"
@@ -270,6 +299,39 @@ config MIPS_CACHE_INDEX_BASE
Normally this is CKSEG0. If the MIPS system needs to move this block
to some SRAM or ScratchPad RAM, adapt this option accordingly.
+config MIPS_MACH_EARLY_INIT
+ bool "Enable mach specific very early init code"
+ help
+ Use this to enable the call to mips_mach_early_init() very early
+ from start.S. This function can be used e.g. to do some very early
+ CPU / SoC intitialization or image copying. Its called very early
+ and at this stage the PC might not match the linking address
+ (CONFIG_TEXT_BASE) - no absolute jump done until this call.
+
+config MIPS_CACHE_SETUP
+ bool "Allow generic start code to initialize and setup caches"
+ default n if SKIP_LOWLEVEL_INIT
+ default y
+ help
+ This allows the generic start code to invoke the generic initialization
+ of the CPU caches. Disabling this can be useful for RAM boot scenarios
+ (EJTAG, SPL payload) or for machines which don't need cache initialization
+ or which want to provide their own cache implementation.
+
+ If unsure, say yes.
+
+config MIPS_CACHE_DISABLE
+ bool "Allow generic start code to initially disable caches"
+ default n if SKIP_LOWLEVEL_INIT
+ default y
+ help
+ This allows the generic start code to initially disable the CPU caches
+ and run uncached until the caches are initialized and enabled. Disabling
+ this can be useful on machines which don't need cache initialization or
+ which want to provide their own cache implementation.
+
+ If unsure, say yes.
+
config MIPS_RELOCATION_TABLE_SIZE
hex "Relocation table size"
range 0x100 0x10000
@@ -398,6 +460,12 @@ config SUPPORTS_CPU_MIPS64_R2
config SUPPORTS_CPU_MIPS64_R6
bool
+config SUPPORTS_CPU_MIPS64_OCTEON
+ bool
+
+config CPU_CAVIUM_OCTEON
+ bool
+
config CPU_MIPS32
bool
default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
@@ -405,6 +473,7 @@ config CPU_MIPS32
config CPU_MIPS64
bool
default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
+ default y if CPU_MIPS64_OCTEON
config MIPS_TUNE_4KC
bool
@@ -421,6 +490,9 @@ config MIPS_TUNE_34KC
config MIPS_TUNE_74KC
bool
+config MIPS_TUNE_OCTEON3
+ bool
+
config 32BIT
bool
@@ -453,6 +525,11 @@ config MIPS_SRAM_INIT
before it can be used. If enabled, a function mips_sram_init() will
be called just before setup_stack_gd.
+config DMA_ADDR_T_64BIT
+ bool
+ help
+ Select this to enable 64-bit DMA addressing
+
config SYS_DCACHE_SIZE
int
default 0
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index af3f227436..6502aebd29 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -17,6 +17,7 @@ machine-$(CONFIG_ARCH_JZ47XX) += jz47xx
machine-$(CONFIG_MACH_PIC32) += pic32
machine-$(CONFIG_ARCH_MTMIPS) += mtmips
machine-$(CONFIG_ARCH_MSCC) += mscc
+machine-${CONFIG_ARCH_OCTEON} += octeon
machdirs := $(patsubst %,arch/mips/mach-%/,$(machine-y))
libs-y += $(machdirs)
@@ -30,6 +31,7 @@ arch-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,-mips32r6
arch-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,-mips64
arch-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,-mips64r2
arch-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,-mips64r6
+arch-${CONFIG_CPU_MIPS64_OCTEON} += -march=octeon2
# Allow extra optimization for specific CPUs/SoCs
tune-$(CONFIG_MIPS_TUNE_4KC) += -mtune=4kc
@@ -37,6 +39,7 @@ tune-$(CONFIG_MIPS_TUNE_14KC) += -mtune=14kc
tune-$(CONFIG_MIPS_TUNE_24KC) += -mtune=24kc
tune-$(CONFIG_MIPS_TUNE_34KC) += -mtune=34kc
tune-$(CONFIG_MIPS_TUNE_74KC) += -mtune=74kc
+tune-${CONFIG_MIPS_TUNE_OCTEON3} += -mtune=octeon2
# Include default header files
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
diff --git a/arch/mips/cpu/start.S b/arch/mips/cpu/start.S
index 6de9a2f362..d0c412236d 100644
--- a/arch/mips/cpu/start.S
+++ b/arch/mips/cpu/start.S
@@ -17,19 +17,10 @@
#endif
#ifdef CONFIG_32BIT
-# define MIPS_RELOC 3
# define STATUS_SET 0
#endif
#ifdef CONFIG_64BIT
-# ifdef CONFIG_SYS_LITTLE_ENDIAN
-# define MIPS64_R_INFO(ssym, r_type3, r_type2, r_type) \
- (((r_type) << 24) | ((r_type2) << 16) | ((r_type3) << 8) | (ssym))
-# else
-# define MIPS64_R_INFO(ssym, r_type3, r_type2, r_type) \
- ((r_type) | ((r_type2) << 8) | ((r_type3) << 16) | (ssym) << 24)
-# endif
-# define MIPS_RELOC MIPS64_R_INFO(0x00, 0x00, 0x12, 0x03)
# define STATUS_SET ST0_KX
#endif
@@ -147,7 +138,7 @@ reset:
and t0, t0, (1 << 31)
#else
1: mfc0 t0, CP0_EBASE
- and t0, t0, EBASE_CPUNUM
+ and t0, t0, MIPS_EBASE_CPUNUM
#endif
/* Hang if this isn't the first CPU in the system */
@@ -204,12 +195,11 @@ wr_done:
/* Clear timer interrupt (CP0_COUNT cleared on branch to 'reset') */
mtc0 zero, CP0_COMPARE
-#ifndef CONFIG_SKIP_LOWLEVEL_INIT
- mfc0 t0, CP0_CONFIG
- and t0, t0, MIPS_CONF_IMPL
- or t0, t0, CONF_CM_UNCACHED
- mtc0 t0, CP0_CONFIG
- ehb
+#ifdef CONFIG_MIPS_CACHE_DISABLE
+ /* Disable caches */
+ PTR_LA t9, mips_cache_disable
+ jalr t9
+ nop
#endif
#ifdef CONFIG_MIPS_CM
@@ -244,12 +234,21 @@ wr_done:
jalr t9
nop
# endif
+#endif
+#ifdef CONFIG_MIPS_MACH_EARLY_INIT
+ bal mips_mach_early_init
+ nop
+#endif
+
+#ifdef CONFIG_MIPS_CACHE_SETUP
/* Initialize caches... */
PTR_LA t9, mips_cache_reset
jalr t9
nop
+#endif
+#ifndef CONFIG_SKIP_LOWLEVEL_INIT
# ifndef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
/* Initialize any external memory */
PTR_LA t9, lowlevel_init
diff --git a/arch/mips/cpu/time.c b/arch/mips/cpu/time.c
index e0c1868b8c..5e7a7144d0 100644
--- a/arch/mips/cpu/time.c
+++ b/arch/mips/cpu/time.c
@@ -13,7 +13,9 @@ unsigned long notrace timer_read_counter(void)
return read_c0_count();
}
+#if defined(CONFIG_SYS_MIPS_TIMER_FREQ)
ulong notrace __weak get_tbclk(void)
{
return CONFIG_SYS_MIPS_TIMER_FREQ;
}
+#endif
diff --git a/arch/mips/dts/Makefile b/arch/mips/dts/Makefile
index f711e9fb59..dc85901dca 100644
--- a/arch/mips/dts/Makefile
+++ b/arch/mips/dts/Makefile
@@ -18,6 +18,7 @@ dtb-$(CONFIG_BOARD_COMTREND_VR3032U) += comtrend,vr-3032u.dtb
dtb-$(CONFIG_BOARD_COMTREND_WAP5813N) += comtrend,wap-5813n.dtb
dtb-$(CONFIG_BOARD_HUAWEI_HG556A) += huawei,hg556a.dtb
dtb-$(CONFIG_BOARD_MT7628_RFB) += mediatek,mt7628-rfb.dtb
+dtb-$(CONFIG_TARGET_OCTEON_EBB7304) += mrvl,octeon-ebb7304.dtb
dtb-$(CONFIG_BOARD_NETGEAR_CG3100D) += netgear,cg3100d.dtb
dtb-$(CONFIG_BOARD_NETGEAR_DGND3700V2) += netgear,dgnd3700v2.dtb
dtb-$(CONFIG_BOARD_SAGEM_FAST1704) += sagem,f@st1704.dtb
diff --git a/arch/mips/dts/mrvl,cn73xx.dtsi b/arch/mips/dts/mrvl,cn73xx.dtsi
new file mode 100644
index 0000000000..a7bd55f8ad
--- /dev/null
+++ b/arch/mips/dts/mrvl,cn73xx.dtsi
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Marvell / Cavium Inc. CN73xx
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc0: soc@0 {
+ interrupt-parent = <&ciu3>;
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges; /* Direct mapping */
+
+ ciu3: interrupt-controller@1010000000000 {
+ compatible = "cavium,octeon-7890-ciu3";
+ interrupt-controller;
+ /*
+ * Interrupts are specified by two parts:
+ * 1) Source number (20 significant bits)
+ * 2) Trigger type: (4 == level, 1 == edge)
+ */
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x10100 0x00000000 0x0 0xb0000000>;
+ };
+
+ bootbus: bootbus@1180000000000 {
+ compatible = "cavium,octeon-3860-bootbus","simple-bus";
+ reg = <0x11800 0x00000000 0x0 0x200>;
+ /* The chip select number and offset */
+ #address-cells = <2>;
+ /* The size of the chip select region */
+ #size-cells = <1>;
+ };
+
+ reset: reset@1180006001600 {
+ compatible = "mrvl,cn7xxx-rst";
+ reg = <0x11800 0x06001600 0x0 0x200>;
+ };
+
+ uart0: serial@1180000000800 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000800 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <0x08000 4>;
+ };
+
+ uart1: serial@1180000000c00 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000c00 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <0x08040 4>;
+ };
+ };
+};
diff --git a/arch/mips/dts/mrvl,octeon-ebb7304.dts b/arch/mips/dts/mrvl,octeon-ebb7304.dts
new file mode 100644
index 0000000000..4e9c2de7d4
--- /dev/null
+++ b/arch/mips/dts/mrvl,octeon-ebb7304.dts
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Marvell / Cavium Inc. EVB CN7300
+ */
+
+/dts-v1/;
+
+/include/ "mrvl,cn73xx.dtsi"
+
+/ {
+ model = "cavium,ebb7304";
+ compatible = "cavium,ebb7304";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = &uart0;
+ };
+};
+
+&bootbus {
+ /*
+ * bootbus CS0 for CFI flash is remapped (0x1fc0.0000 -> 1f40.0000)
+ * as the initial size is too small for the 8MiB flash device
+ */
+ ranges = <0 0 0 0x1f400000 0xc00000>,
+ <1 0 0x10000 0x10000000 0>,
+ <2 0 0x10000 0x20000000 0>,
+ <3 0 0x10000 0x30000000 0>,
+ <4 0 0 0x1d020000 0x10000>,
+ <5 0 0x10000 0x50000000 0>,
+ <6 0 0x10000 0x60000000 0>,
+ <7 0 0x10000 0x70000000 0>;
+
+ cavium,cs-config@0 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <0>;
+ cavium,t-adr = <10>;
+ cavium,t-ce = <50>;
+ cavium,t-oe = <50>;
+ cavium,t-we = <35>;
+ cavium,t-rd-hld = <25>;
+ cavium,t-wr-hld = <35>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <50>;
+ cavium,t-page = <30>;
+ cavium,t-rd-dly = <0>;
+ cavium,page-mode = <1>;
+ cavium,pages = <8>;
+ cavium,bus-width = <8>;
+ };
+
+ cavium,cs-config@4 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <4>;
+ cavium,t-adr = <10>;
+ cavium,t-ce = <10>;
+ cavium,t-oe = <160>;
+ cavium,t-we = <100>;
+ cavium,t-rd-hld = <10>;
+ cavium,t-wr-hld = <0>;
+ cavium,t-pause = <50>;
+ cavium,t-wait = <50>;
+ cavium,t-page = <10>;
+ cavium,t-rd-dly = <10>;
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+
+ flash0: nor@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "bootloader";
+ reg = <0 0x340000>;
+ read-only;
+ };
+ partition@300000 {
+ label = "storage";
+ reg = <0x340000 0x4be000>;
+ };
+ partition@7fe000 {
+ label = "environment";
+ reg = <0x7fe000 0x2000>;
+ read-only;
+ };
+ };
+};
+
+&uart0 {
+ clock-frequency = <1200000000>;
+};
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index b6d387677e..8112ab833e 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -42,7 +42,7 @@
/*
* Returns the kernel segment base of a given address
*/
-#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000)
+#define KSEGX(a) ((_ACAST32_(a)) & _ACAST32_(0xe0000000))
/*
* Returns the physical address of a CKSEGx / XKPHYS address
@@ -123,21 +123,7 @@
#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED, (p))
#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p))
#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK)
-#define PHYS_TO_XKPHYS(cm, a) (_CONST64_(0x8000000000000000) | \
- (_CONST64_(cm) << 59) | (a))
-
-/*
- * Returns the uncached address of a sdram address
- */
-#ifndef __ASSEMBLY__
-#if defined(CONFIG_TB0229)
-/* We use a 36 bit physical address map here and
- cannot access physical memory directly from core */
-#define UNCACHED_SDRAM(a) (((unsigned long)(a)) | 0x20000000)
-#else /* !CONFIG_TB0229 */
-#define UNCACHED_SDRAM(a) CKSEG1ADDR(a)
-#endif /* CONFIG_TB0229 */
-#endif /* __ASSEMBLY__ */
+#define PHYS_TO_XKPHYS(cm, a) (XKPHYS | (_ACAST64_(cm) << 59) | (a))
/*
* The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
@@ -146,18 +132,9 @@
*/
#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */
-#ifndef CONFIG_CPU_R8000
-
-/*
- * The R8000 doesn't have the 32-bit compat spaces so we don't define them
- * in order to catch bugs in the source code.
- */
-
#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000)
#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */
-#endif
-
#define KDM_TO_PHYS(x) (_ACAST64_ (x) & TO_PHYS_MASK)
#define PHYS_TO_K0(x) (_ACAST64_ (x) | CAC_BASE)
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 7abcf6df07..a6876e1b07 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -16,37 +16,12 @@
#include <asm/sgidefs.h>
-#ifndef CAT
-#ifdef __STDC__
-#define __CAT(str1, str2) str1##str2
-#else
-#define __CAT(str1, str2) str1/**/str2
-#endif
-#define CAT(str1, str2) __CAT(str1, str2)
-#endif
-
-/*
- * PIC specific declarations
- * Not used for the kernel but here seems to be the right place.
- */
-#ifdef __PIC__
-#define CPRESTORE(register) \
- .cprestore register
-#define CPADD(register) \
- .cpadd register
-#define CPLOAD(register) \
- .cpload register
-#else
-#define CPRESTORE(register)
-#define CPADD(register)
-#define CPLOAD(register)
-#endif
-
#define ENTRY(symbol) \
.globl symbol; \
.type symbol, @function; \
.ent symbol, 0; \
-symbol:
+symbol: .cfi_startproc; \
+ .insn
/*
* LEAF - declare leaf routine
@@ -57,7 +32,9 @@ symbol:
.type symbol, @function; \
.ent symbol, 0; \
.section .text.symbol, "x"; \
-symbol: .frame sp, 0, ra
+symbol: .frame sp, 0, ra; \
+ .cfi_startproc; \
+ .insn
/*
* NESTED - declare nested routine entry point
@@ -68,12 +45,15 @@ symbol: .frame sp, 0, ra
.type symbol, @function; \
.ent symbol, 0; \
.section .text.symbol, "x"; \
-symbol: .frame sp, framesize, rpc
+symbol: .frame sp, framesize, rpc; \
+ .cfi_startproc; \
+ .insn
/*
* END - mark end of function
*/
#define END(function) \
+ .cfi_endproc; \
.end function; \
.size function, .-function
@@ -90,7 +70,7 @@ symbol:
#define FEXPORT(symbol) \
.globl symbol; \
.type symbol, @function; \
-symbol:
+symbol: .insn
/*
* ABS - export absolute symbol
@@ -129,96 +109,6 @@ symbol = value
.popsection;
/*
- * Build text tables
- */
-#define TTABLE(string) \
- .pushsection .text; \
- .word 1f; \
- .popsection \
- .pushsection .data; \
-1: .asciiz string; \
- .popsection
-
-/*
- * MIPS IV pref instruction.
- * Use with .set noreorder only!
- *
- * MIPS IV implementations are free to treat this as a nop. The R5000
- * is one of them. So we should have an option not to use this instruction.
- */
-#ifdef CONFIG_CPU_HAS_PREFETCH
-
-#define PREF(hint, addr) \
- .set push; \
- .set arch=r5000; \
- pref hint, addr; \
- .set pop
-
-#define PREFE(hint, addr) \
- .set push; \
- .set mips0; \
- .set eva; \
- prefe hint, addr; \
- .set pop
-
-#define PREFX(hint, addr) \
- .set push; \
- .set arch=r5000; \
- prefx hint, addr; \
- .set pop
-
-#else /* !CONFIG_CPU_HAS_PREFETCH */
-
-#define PREF(hint, addr)
-#define PREFE(hint, addr)
-#define PREFX(hint, addr)
-
-#endif /* !CONFIG_CPU_HAS_PREFETCH */
-
-/*
- * MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
- */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS1)
-#define MOVN(rd, rs, rt) \
- .set push; \
- .set reorder; \
- beqz rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#define MOVZ(rd, rs, rt) \
- .set push; \
- .set reorder; \
- bnez rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
-#define MOVN(rd, rs, rt) \
- .set push; \
- .set noreorder; \
- bnezl rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#define MOVZ(rd, rs, rt) \
- .set push; \
- .set noreorder; \
- beqzl rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
- (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
-#define MOVN(rd, rs, rt) \
- movn rd, rs, rt
-#define MOVZ(rd, rs, rt) \
- movz rd, rs, rt
-#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
-
-/*
* Stack alignment
*/
#if (_MIPS_SIM == _MIPS_SIM_ABI32)
diff --git a/arch/mips/include/asm/cm.h b/arch/mips/include/asm/cm.h
index 3878171607..99ddbccd80 100644
--- a/arch/mips/include/asm/cm.h
+++ b/arch/mips/include/asm/cm.h
@@ -41,6 +41,7 @@
#include <asm/io.h>
#include <linux/bitops.h>
+#if CONFIG_IS_ENABLED(MIPS_CM)
static inline void *mips_cm_base(void)
{
return (void *)CKSEG1ADDR(CONFIG_MIPS_CM_BASE);
@@ -56,6 +57,17 @@ static inline unsigned long mips_cm_l2_line_size(void)
line_sz &= GENMASK(GCR_L2_CONFIG_LINESZ_BITS - 1, 0);
return line_sz ? (2 << line_sz) : 0;
}
+#else
+static inline void *mips_cm_base(void)
+{
+ return NULL;
+}
+
+static inline unsigned long mips_cm_l2_line_size(void)
+{
+ return 0;
+}
+#endif
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
new file mode 100644
index 0000000000..c498b42f1c
--- /dev/null
+++ b/arch/mips/include/asm/compiler.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2004, 2007 Maciej W. Rozycki
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef _ASM_COMPILER_H
+#define _ASM_COMPILER_H
+
+/*
+ * With GCC 4.5 onwards we can use __builtin_unreachable to indicate to the
+ * compiler that a particular code path will never be hit. This allows it to be
+ * optimised out of the generated binary.
+ *
+ * Unfortunately at least GCC 4.6.3 through 7.3.0 inclusive suffer from a bug
+ * that can lead to instructions from beyond an unreachable statement being
+ * incorrectly reordered into earlier delay slots if the unreachable statement
+ * is the only content of a case in a switch statement. This can lead to
+ * seemingly random behaviour, such as invalid memory accesses from incorrectly
+ * reordered loads or stores. See this potential GCC fix for details:
+ *
+ * https://gcc.gnu.org/ml/gcc-patches/2015-09/msg00360.html
+ *
+ * It is unclear whether GCC 8 onwards suffer from the same issue - nothing
+ * relevant is mentioned in GCC 8 release notes and nothing obviously relevant
+ * stands out in GCC commit logs, but these newer GCC versions generate very
+ * different code for the testcase which doesn't exhibit the bug.
+ *
+ * GCC also handles stack allocation suboptimally when calling noreturn
+ * functions or calling __builtin_unreachable():
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
+ *
+ * We work around both of these issues by placing a volatile asm statement,
+ * which GCC is prevented from reordering past, prior to __builtin_unreachable
+ * calls.
+ *
+ * The .insn statement is required to ensure that any branches to the
+ * statement, which sadly must be kept due to the asm statement, are known to
+ * be branches to code and satisfy linker requirements for microMIPS kernels.
+ */
+#undef barrier_before_unreachable
+#define barrier_before_unreachable() asm volatile(".insn")
+
+#if !defined(CONFIG_CC_IS_GCC) || \
+ (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
+# define GCC_OFF_SMALL_ASM() "ZC"
+#elif defined(CONFIG_CPU_MICROMIPS)
+# error "microMIPS compilation unsupported with GCC older than 4.9"
+#else
+# define GCC_OFF_SMALL_ASM() "R"
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+#define MIPS_ISA_LEVEL "mips64r6"
+#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
+#define MIPS_ISA_LEVEL_RAW mips64r6
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#else
+/* MIPS64 is a superset of MIPS32 */
+#define MIPS_ISA_LEVEL "mips64r2"
+#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
+#define MIPS_ISA_LEVEL_RAW mips64r2
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#endif /* _ASM_COMPILER_H */
diff --git a/arch/mips/include/asm/isa-rev.h b/arch/mips/include/asm/isa-rev.h
new file mode 100644
index 0000000000..683ea3454d
--- /dev/null
+++ b/arch/mips/include/asm/isa-rev.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MIPS Tech, LLC
+ * Author: Matt Redfearn <matt.redfearn@mips.com>
+ */
+
+#ifndef __MIPS_ASM_ISA_REV_H__
+#define __MIPS_ASM_ISA_REV_H__
+
+/*
+ * The ISA revision level. This is 0 for MIPS I to V and N for
+ * MIPS{32,64}rN.
+ */
+
+/* If the compiler has defined __mips_isa_rev, believe it. */
+#ifdef __mips_isa_rev
+#define MIPS_ISA_REV __mips_isa_rev
+#else
+/* The compiler hasn't defined the isa rev so assume it's MIPS I - V (0) */
+#define MIPS_ISA_REV 0
+#endif
+
+
+#endif /* __MIPS_ASM_ISA_REV_H__ */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 7538e6b2e0..e65485b4ff 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -10,6 +10,8 @@
#ifndef _ASM_MIPSREGS_H
#define _ASM_MIPSREGS_H
+#include <asm/compiler.h>
+#include <asm/isa-rev.h>
/*
* The following macros are especially useful for __asm__
* inline assembler.
@@ -26,9 +28,10 @@
*/
#ifdef __ASSEMBLY__
#define _ULCAST_
+#define _U64CAST_
#else
-#include <linux/bitops.h>
#define _ULCAST_ (unsigned long)
+#define _U64CAST_ (u64)
#endif
/*
@@ -42,15 +45,25 @@
#define CP0_GLOBALNUMBER $3, 1
#define CP0_CONTEXT $4
#define CP0_PAGEMASK $5
+#define CP0_PAGEGRAIN $5, 1
+#define CP0_SEGCTL0 $5, 2
+#define CP0_SEGCTL1 $5, 3
+#define CP0_SEGCTL2 $5, 4
#define CP0_WIRED $6
#define CP0_INFO $7
-#define CP0_HWRENA $7, 0
+#define CP0_HWRENA $7
#define CP0_BADVADDR $8
#define CP0_BADINSTR $8, 1
#define CP0_COUNT $9
#define CP0_ENTRYHI $10
+#define CP0_GUESTCTL1 $10, 4
+#define CP0_GUESTCTL2 $10, 5
+#define CP0_GUESTCTL3 $10, 6
#define CP0_COMPARE $11
+#define CP0_GUESTCTL0EXT $11, 4
#define CP0_STATUS $12
+#define CP0_GUESTCTL0 $12, 6
+#define CP0_GTOFFSET $12, 7
#define CP0_CAUSE $13
#define CP0_EPC $14
#define CP0_PRID $15
@@ -59,6 +72,7 @@
#define CP0_CONFIG $16
#define CP0_CONFIG3 $16, 3
#define CP0_CONFIG5 $16, 5
+#define CP0_CONFIG6 $16, 6
#define CP0_LLADDR $17
#define CP0_WATCHLO $18
#define CP0_WATCHHI $19
@@ -131,6 +145,16 @@
#define MIPS_ENTRYLO_RI (_ULCAST_(1) << (BITS_PER_LONG - 1))
/*
+ * MIPSr6+ GlobalNumber register definitions
+ */
+#define MIPS_GLOBALNUMBER_VP_SHF 0
+#define MIPS_GLOBALNUMBER_VP (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_VP_SHF)
+#define MIPS_GLOBALNUMBER_CORE_SHF 8
+#define MIPS_GLOBALNUMBER_CORE (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_CORE_SHF)
+#define MIPS_GLOBALNUMBER_CLUSTER_SHF 16
+#define MIPS_GLOBALNUMBER_CLUSTER (_ULCAST_(0xf) << MIPS_GLOBALNUMBER_CLUSTER_SHF)
+
+/*
* Values for PageMask register
*/
#ifdef CONFIG_CPU_VR41XX
@@ -189,6 +213,8 @@
/* MIPS32/64 EntryHI bit definitions */
#define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10)
+#define MIPS_ENTRYHI_ASIDX (_ULCAST_(0x3) << 8)
+#define MIPS_ENTRYHI_ASID (_ULCAST_(0xff) << 0)
/*
* R4x00 interrupt enable / cause bits
@@ -351,10 +377,14 @@
#define CAUSEF_IP7 (_ULCAST_(1) << 15)
#define CAUSEB_FDCI 21
#define CAUSEF_FDCI (_ULCAST_(1) << 21)
+#define CAUSEB_WP 22
+#define CAUSEF_WP (_ULCAST_(1) << 22)
#define CAUSEB_IV 23
#define CAUSEF_IV (_ULCAST_(1) << 23)
#define CAUSEB_PCI 26
#define CAUSEF_PCI (_ULCAST_(1) << 26)
+#define CAUSEB_DC 27
+#define CAUSEF_DC (_ULCAST_(1) << 27)
#define CAUSEB_CE 28
#define CAUSEF_CE (_ULCAST_(3) << 28)
#define CAUSEB_TI 30
@@ -363,9 +393,36 @@
#define CAUSEF_BD (_ULCAST_(1) << 31)
/*
- * Bits in the coprocessor 0 EBase register.
+ * Cause.ExcCode trap codes.
*/
-#define EBASE_CPUNUM 0x3ff
+#define EXCCODE_INT 0 /* Interrupt pending */
+#define EXCCODE_MOD 1 /* TLB modified fault */
+#define EXCCODE_TLBL 2 /* TLB miss on load or ifetch */
+#define EXCCODE_TLBS 3 /* TLB miss on a store */
+#define EXCCODE_ADEL 4 /* Address error on a load or ifetch */
+#define EXCCODE_ADES 5 /* Address error on a store */
+#define EXCCODE_IBE 6 /* Bus error on an ifetch */
+#define EXCCODE_DBE 7 /* Bus error on a load or store */
+#define EXCCODE_SYS 8 /* System call */
+#define EXCCODE_BP 9 /* Breakpoint */
+#define EXCCODE_RI 10 /* Reserved instruction exception */
+#define EXCCODE_CPU 11 /* Coprocessor unusable */
+#define EXCCODE_OV 12 /* Arithmetic overflow */
+#define EXCCODE_TR 13 /* Trap instruction */
+#define EXCCODE_MSAFPE 14 /* MSA floating point exception */
+#define EXCCODE_FPE 15 /* Floating point exception */
+#define EXCCODE_TLBRI 19 /* TLB Read-Inhibit exception */
+#define EXCCODE_TLBXI 20 /* TLB Execution-Inhibit exception */
+#define EXCCODE_MSADIS 21 /* MSA disabled exception */
+#define EXCCODE_MDMX 22 /* MDMX unusable exception */
+#define EXCCODE_WATCH 23 /* Watch address reference */
+#define EXCCODE_MCHECK 24 /* Machine check */
+#define EXCCODE_THREAD 25 /* Thread exceptions (MT) */
+#define EXCCODE_DSPDIS 26 /* DSP disabled exception */
+#define EXCCODE_GE 27 /* Virtualized guest exception (VZ) */
+
+/* Implementation specific trap codes used by MIPS cores */
+#define MIPS_EXCCODE_TLBPAR 16 /* TLB parity error exception */
/*
* Bits in the coprocessor 0 config register.
@@ -393,7 +450,7 @@
#define CONF_SM (_ULCAST_(1) << 16)
#define CONF_SC (_ULCAST_(1) << 17)
#define CONF_EW (_ULCAST_(3) << 18)
-#define CONF_EP (_ULCAST_(15) << 24)
+#define CONF_EP (_ULCAST_(15)<< 24)
#define CONF_EC (_ULCAST_(7) << 28)
#define CONF_CM (_ULCAST_(1) << 31)
@@ -419,7 +476,7 @@
#define R10K_CONF_CT (_ULCAST_(1) << 5)
#define R10K_CONF_PE (_ULCAST_(1) << 6)
#define R10K_CONF_PM (_ULCAST_(3) << 7)
-#define R10K_CONF_EC (_ULCAST_(15) << 9)
+#define R10K_CONF_EC (_ULCAST_(15)<< 9)
#define R10K_CONF_SB (_ULCAST_(1) << 13)
#define R10K_CONF_SK (_ULCAST_(1) << 14)
#define R10K_CONF_SS (_ULCAST_(7) << 16)
@@ -452,6 +509,7 @@
#define TX49_CONF_CWFON (_ULCAST_(1) << 27)
/* Bits specific to the MIPS32/64 PRA. */
+#define MIPS_CONF_VI (_ULCAST_(1) << 3)
#define MIPS_CONF_MT (_ULCAST_(7) << 7)
#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7)
#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7)
@@ -548,7 +606,8 @@
#define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14)
#define MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT (_ULCAST_(2) << 14)
#define MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT (_ULCAST_(3) << 14)
-#define MIPS_CONF4_KSCREXIST (_ULCAST_(255) << 16)
+#define MIPS_CONF4_KSCREXIST_SHIFT (16)
+#define MIPS_CONF4_KSCREXIST (_ULCAST_(255) << MIPS_CONF4_KSCREXIST_SHIFT)
#define MIPS_CONF4_VTLBSIZEEXT_SHIFT (24)
#define MIPS_CONF4_VTLBSIZEEXT (_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT)
#define MIPS_CONF4_AE (_ULCAST_(1) << 28)
@@ -561,9 +620,13 @@
#define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
#define MIPS_CONF5_VP (_ULCAST_(1) << 7)
+#define MIPS_CONF5_SBRI (_ULCAST_(1) << 6)
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_L2C (_ULCAST_(1) << 10)
+#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14)
+#define MIPS_CONF5_MI (_ULCAST_(1) << 17)
+#define MIPS_CONF5_CRCP (_ULCAST_(1) << 18)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
#define MIPS_CONF5_CV (_ULCAST_(1) << 29)
@@ -572,6 +635,8 @@
#define MIPS_CONF6_SYND (_ULCAST_(1) << 13)
/* proAptiv FTLB on/off bit */
#define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15)
+/* Loongson-3 FTLB on/off bit */
+#define MIPS_CONF6_FTLBDIS (_ULCAST_(1) << 22)
/* FTLB probability bits */
#define MIPS_CONF6_FTLBP_SHIFT (16)
@@ -581,19 +646,95 @@
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
-/* FTLB probability bits for R6 */
-#define MIPS_CONF7_FTLBP_SHIFT (18)
+
+/* Ingenic HPTLB off bits */
+#define XBURST_PAGECTRL_HPTLB_DIS 0xa9000000
+
+/* Ingenic Config7 bits */
+#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
+
+/* Config7 Bits specific to MIPS Technologies. */
+
+/* Performance counters implemented Per TC */
+#define MTI_CONF7_PTC (_ULCAST_(1) << 19)
+
+/* WatchLo* register definitions */
+#define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0)
+
+/* WatchHi* register definitions */
+#define MIPS_WATCHHI_M (_ULCAST_(1) << 31)
+#define MIPS_WATCHHI_G (_ULCAST_(1) << 30)
+#define MIPS_WATCHHI_WM (_ULCAST_(0x3) << 28)
+#define MIPS_WATCHHI_WM_R_RVA (_ULCAST_(0) << 28)
+#define MIPS_WATCHHI_WM_R_GPA (_ULCAST_(1) << 28)
+#define MIPS_WATCHHI_WM_G_GVA (_ULCAST_(2) << 28)
+#define MIPS_WATCHHI_EAS (_ULCAST_(0x3) << 24)
+#define MIPS_WATCHHI_ASID (_ULCAST_(0xff) << 16)
+#define MIPS_WATCHHI_MASK (_ULCAST_(0x1ff) << 3)
+#define MIPS_WATCHHI_I (_ULCAST_(1) << 2)
+#define MIPS_WATCHHI_R (_ULCAST_(1) << 1)
+#define MIPS_WATCHHI_W (_ULCAST_(1) << 0)
+#define MIPS_WATCHHI_IRW (_ULCAST_(0x7) << 0)
+
+/* PerfCnt control register definitions */
+#define MIPS_PERFCTRL_EXL (_ULCAST_(1) << 0)
+#define MIPS_PERFCTRL_K (_ULCAST_(1) << 1)
+#define MIPS_PERFCTRL_S (_ULCAST_(1) << 2)
+#define MIPS_PERFCTRL_U (_ULCAST_(1) << 3)
+#define MIPS_PERFCTRL_IE (_ULCAST_(1) << 4)
+#define MIPS_PERFCTRL_EVENT_S 5
+#define MIPS_PERFCTRL_EVENT (_ULCAST_(0x3ff) << MIPS_PERFCTRL_EVENT_S)
+#define MIPS_PERFCTRL_PCTD (_ULCAST_(1) << 15)
+#define MIPS_PERFCTRL_EC (_ULCAST_(0x3) << 23)
+#define MIPS_PERFCTRL_EC_R (_ULCAST_(0) << 23)
+#define MIPS_PERFCTRL_EC_RI (_ULCAST_(1) << 23)
+#define MIPS_PERFCTRL_EC_G (_ULCAST_(2) << 23)
+#define MIPS_PERFCTRL_EC_GRI (_ULCAST_(3) << 23)
+#define MIPS_PERFCTRL_W (_ULCAST_(1) << 30)
+#define MIPS_PERFCTRL_M (_ULCAST_(1) << 31)
+
+/* PerfCnt control register MT extensions used by MIPS cores */
+#define MIPS_PERFCTRL_VPEID_S 16
+#define MIPS_PERFCTRL_VPEID (_ULCAST_(0xf) << MIPS_PERFCTRL_VPEID_S)
+#define MIPS_PERFCTRL_TCID_S 22
+#define MIPS_PERFCTRL_TCID (_ULCAST_(0xff) << MIPS_PERFCTRL_TCID_S)
+#define MIPS_PERFCTRL_MT_EN (_ULCAST_(0x3) << 20)
+#define MIPS_PERFCTRL_MT_EN_ALL (_ULCAST_(0) << 20)
+#define MIPS_PERFCTRL_MT_EN_VPE (_ULCAST_(1) << 20)
+#define MIPS_PERFCTRL_MT_EN_TC (_ULCAST_(2) << 20)
+
+/* PerfCnt control register MT extensions used by BMIPS5000 */
+#define BRCM_PERFCTRL_TC (_ULCAST_(1) << 30)
+
+/* PerfCnt control register MT extensions used by Netlogic XLR */
+#define XLR_PERFCTRL_ALLTHREADS (_ULCAST_(1) << 13)
/* MAAR bit definitions */
+#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
#define MIPS_MAAR_ADDR_SHIFT 12
#define MIPS_MAAR_S (_ULCAST_(1) << 1)
-#define MIPS_MAAR_V (_ULCAST_(1) << 0)
+#define MIPS_MAAR_VL (_ULCAST_(1) << 0)
+
+/* MAARI bit definitions */
+#define MIPS_MAARI_INDEX (_ULCAST_(0x3f) << 0)
+
+/* EBase bit definitions */
+#define MIPS_EBASE_CPUNUM_SHIFT 0
+#define MIPS_EBASE_CPUNUM (_ULCAST_(0x3ff) << 0)
+#define MIPS_EBASE_WG_SHIFT 11
+#define MIPS_EBASE_WG (_ULCAST_(1) << 11)
+#define MIPS_EBASE_BASE_SHIFT 12
+#define MIPS_EBASE_BASE (~_ULCAST_((1 << MIPS_EBASE_BASE_SHIFT) - 1))
/* CMGCRBase bit definitions */
#define MIPS_CMGCRB_BASE 11
#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
+/* LLAddr bit definitions */
+#define MIPS_LLADDR_LLB_SHIFT 0
+#define MIPS_LLADDR_LLB (_ULCAST_(1) << MIPS_LLADDR_LLB_SHIFT)
+
/*
* Bits in the MIPS32 Memory Segmentation registers.
*/
@@ -625,6 +766,8 @@
#define MIPS_PWFIELD_PTEI_SHIFT 0
#define MIPS_PWFIELD_PTEI_MASK 0x0000003f
+#define MIPS_PWSIZE_PS_SHIFT 30
+#define MIPS_PWSIZE_PS_MASK 0x40000000
#define MIPS_PWSIZE_GDW_SHIFT 24
#define MIPS_PWSIZE_GDW_MASK 0x3f000000
#define MIPS_PWSIZE_UDW_SHIFT 18
@@ -638,6 +781,12 @@
#define MIPS_PWCTL_PWEN_SHIFT 31
#define MIPS_PWCTL_PWEN_MASK 0x80000000
+#define MIPS_PWCTL_XK_SHIFT 28
+#define MIPS_PWCTL_XK_MASK 0x10000000
+#define MIPS_PWCTL_XS_SHIFT 27
+#define MIPS_PWCTL_XS_MASK 0x08000000
+#define MIPS_PWCTL_XU_SHIFT 26
+#define MIPS_PWCTL_XU_MASK 0x04000000
#define MIPS_PWCTL_DPH_SHIFT 7
#define MIPS_PWCTL_DPH_MASK 0x00000080
#define MIPS_PWCTL_HUGEPG_SHIFT 6
@@ -645,6 +794,94 @@
#define MIPS_PWCTL_PSN_SHIFT 0
#define MIPS_PWCTL_PSN_MASK 0x0000003f
+/* GuestCtl0 fields */
+#define MIPS_GCTL0_GM_SHIFT 31
+#define MIPS_GCTL0_GM (_ULCAST_(1) << MIPS_GCTL0_GM_SHIFT)
+#define MIPS_GCTL0_RI_SHIFT 30
+#define MIPS_GCTL0_RI (_ULCAST_(1) << MIPS_GCTL0_RI_SHIFT)
+#define MIPS_GCTL0_MC_SHIFT 29
+#define MIPS_GCTL0_MC (_ULCAST_(1) << MIPS_GCTL0_MC_SHIFT)
+#define MIPS_GCTL0_CP0_SHIFT 28
+#define MIPS_GCTL0_CP0 (_ULCAST_(1) << MIPS_GCTL0_CP0_SHIFT)
+#define MIPS_GCTL0_AT_SHIFT 26
+#define MIPS_GCTL0_AT (_ULCAST_(0x3) << MIPS_GCTL0_AT_SHIFT)
+#define MIPS_GCTL0_GT_SHIFT 25
+#define MIPS_GCTL0_GT (_ULCAST_(1) << MIPS_GCTL0_GT_SHIFT)
+#define MIPS_GCTL0_CG_SHIFT 24
+#define MIPS_GCTL0_CG (_ULCAST_(1) << MIPS_GCTL0_CG_SHIFT)
+#define MIPS_GCTL0_CF_SHIFT 23
+#define MIPS_GCTL0_CF (_ULCAST_(1) << MIPS_GCTL0_CF_SHIFT)
+#define MIPS_GCTL0_G1_SHIFT 22
+#define MIPS_GCTL0_G1 (_ULCAST_(1) << MIPS_GCTL0_G1_SHIFT)
+#define MIPS_GCTL0_G0E_SHIFT 19
+#define MIPS_GCTL0_G0E (_ULCAST_(1) << MIPS_GCTL0_G0E_SHIFT)
+#define MIPS_GCTL0_PT_SHIFT 18
+#define MIPS_GCTL0_PT (_ULCAST_(1) << MIPS_GCTL0_PT_SHIFT)
+#define MIPS_GCTL0_RAD_SHIFT 9
+#define MIPS_GCTL0_RAD (_ULCAST_(1) << MIPS_GCTL0_RAD_SHIFT)
+#define MIPS_GCTL0_DRG_SHIFT 8
+#define MIPS_GCTL0_DRG (_ULCAST_(1) << MIPS_GCTL0_DRG_SHIFT)
+#define MIPS_GCTL0_G2_SHIFT 7
+#define MIPS_GCTL0_G2 (_ULCAST_(1) << MIPS_GCTL0_G2_SHIFT)
+#define MIPS_GCTL0_GEXC_SHIFT 2
+#define MIPS_GCTL0_GEXC (_ULCAST_(0x1f) << MIPS_GCTL0_GEXC_SHIFT)
+#define MIPS_GCTL0_SFC2_SHIFT 1
+#define MIPS_GCTL0_SFC2 (_ULCAST_(1) << MIPS_GCTL0_SFC2_SHIFT)
+#define MIPS_GCTL0_SFC1_SHIFT 0
+#define MIPS_GCTL0_SFC1 (_ULCAST_(1) << MIPS_GCTL0_SFC1_SHIFT)
+
+/* GuestCtl0.AT Guest address translation control */
+#define MIPS_GCTL0_AT_ROOT 1 /* Guest MMU under Root control */
+#define MIPS_GCTL0_AT_GUEST 3 /* Guest MMU under Guest control */
+
+/* GuestCtl0.GExcCode Hypervisor exception cause codes */
+#define MIPS_GCTL0_GEXC_GPSI 0 /* Guest Privileged Sensitive Instruction */
+#define MIPS_GCTL0_GEXC_GSFC 1 /* Guest Software Field Change */
+#define MIPS_GCTL0_GEXC_HC 2 /* Hypercall */
+#define MIPS_GCTL0_GEXC_GRR 3 /* Guest Reserved Instruction Redirect */
+#define MIPS_GCTL0_GEXC_GVA 8 /* Guest Virtual Address available */
+#define MIPS_GCTL0_GEXC_GHFC 9 /* Guest Hardware Field Change */
+#define MIPS_GCTL0_GEXC_GPA 10 /* Guest Physical Address available */
+
+/* GuestCtl0Ext fields */
+#define MIPS_GCTL0EXT_RPW_SHIFT 8
+#define MIPS_GCTL0EXT_RPW (_ULCAST_(0x3) << MIPS_GCTL0EXT_RPW_SHIFT)
+#define MIPS_GCTL0EXT_NCC_SHIFT 6
+#define MIPS_GCTL0EXT_NCC (_ULCAST_(0x3) << MIPS_GCTL0EXT_NCC_SHIFT)
+#define MIPS_GCTL0EXT_CGI_SHIFT 4
+#define MIPS_GCTL0EXT_CGI (_ULCAST_(1) << MIPS_GCTL0EXT_CGI_SHIFT)
+#define MIPS_GCTL0EXT_FCD_SHIFT 3
+#define MIPS_GCTL0EXT_FCD (_ULCAST_(1) << MIPS_GCTL0EXT_FCD_SHIFT)
+#define MIPS_GCTL0EXT_OG_SHIFT 2
+#define MIPS_GCTL0EXT_OG (_ULCAST_(1) << MIPS_GCTL0EXT_OG_SHIFT)
+#define MIPS_GCTL0EXT_BG_SHIFT 1
+#define MIPS_GCTL0EXT_BG (_ULCAST_(1) << MIPS_GCTL0EXT_BG_SHIFT)
+#define MIPS_GCTL0EXT_MG_SHIFT 0
+#define MIPS_GCTL0EXT_MG (_ULCAST_(1) << MIPS_GCTL0EXT_MG_SHIFT)
+
+/* GuestCtl0Ext.RPW Root page walk configuration */
+#define MIPS_GCTL0EXT_RPW_BOTH 0 /* Root PW for GPA->RPA and RVA->RPA */
+#define MIPS_GCTL0EXT_RPW_GPA 2 /* Root PW for GPA->RPA */
+#define MIPS_GCTL0EXT_RPW_RVA 3 /* Root PW for RVA->RPA */
+
+/* GuestCtl0Ext.NCC Nested cache coherency attributes */
+#define MIPS_GCTL0EXT_NCC_IND 0 /* Guest CCA independent of Root CCA */
+#define MIPS_GCTL0EXT_NCC_MOD 1 /* Guest CCA modified by Root CCA */
+
+/* GuestCtl1 fields */
+#define MIPS_GCTL1_ID_SHIFT 0
+#define MIPS_GCTL1_ID_WIDTH 8
+#define MIPS_GCTL1_ID (_ULCAST_(0xff) << MIPS_GCTL1_ID_SHIFT)
+#define MIPS_GCTL1_RID_SHIFT 16
+#define MIPS_GCTL1_RID_WIDTH 8
+#define MIPS_GCTL1_RID (_ULCAST_(0xff) << MIPS_GCTL1_RID_SHIFT)
+#define MIPS_GCTL1_EID_SHIFT 24
+#define MIPS_GCTL1_EID_WIDTH 8
+#define MIPS_GCTL1_EID (_ULCAST_(0xff) << MIPS_GCTL1_EID_SHIFT)
+
+/* GuestID reserved for root context */
+#define MIPS_GCTL1_ROOT_GUESTID 0
+
/* CDMMBase register bit definitions */
#define MIPS_CDMMBASE_SIZE_SHIFT 0
#define MIPS_CDMMBASE_SIZE (_ULCAST_(511) << MIPS_CDMMBASE_SIZE_SHIFT)
@@ -653,6 +890,24 @@
#define MIPS_CDMMBASE_ADDR_SHIFT 11
#define MIPS_CDMMBASE_ADDR_START 15
+/* RDHWR register numbers */
+#define MIPS_HWR_CPUNUM 0 /* CPU number */
+#define MIPS_HWR_SYNCISTEP 1 /* SYNCI step size */
+#define MIPS_HWR_CC 2 /* Cycle counter */
+#define MIPS_HWR_CCRES 3 /* Cycle counter resolution */
+#define MIPS_HWR_ULR 29 /* UserLocal */
+#define MIPS_HWR_IMPL1 30 /* Implementation dependent */
+#define MIPS_HWR_IMPL2 31 /* Implementation dependent */
+
+/* Bits in HWREna register */
+#define MIPS_HWRENA_CPUNUM (_ULCAST_(1) << MIPS_HWR_CPUNUM)
+#define MIPS_HWRENA_SYNCISTEP (_ULCAST_(1) << MIPS_HWR_SYNCISTEP)
+#define MIPS_HWRENA_CC (_ULCAST_(1) << MIPS_HWR_CC)
+#define MIPS_HWRENA_CCRES (_ULCAST_(1) << MIPS_HWR_CCRES)
+#define MIPS_HWRENA_ULR (_ULCAST_(1) << MIPS_HWR_ULR)
+#define MIPS_HWRENA_IMPL1 (_ULCAST_(1) << MIPS_HWR_IMPL1)
+#define MIPS_HWRENA_IMPL2 (_ULCAST_(1) << MIPS_HWR_IMPL2)
+
/*
* Bitfields in the TX39 family CP0 Configuration Register 3
*/
@@ -696,6 +951,31 @@
/* Disable Branch Return Cache */
#define R10K_DIAG_D_BRC (_ULCAST_(1) << 22)
+/* Flush ITLB */
+#define LOONGSON_DIAG_ITLB (_ULCAST_(1) << 2)
+/* Flush DTLB */
+#define LOONGSON_DIAG_DTLB (_ULCAST_(1) << 3)
+/* Flush VTLB */
+#define LOONGSON_DIAG_VTLB (_ULCAST_(1) << 12)
+/* Flush FTLB */
+#define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13)
+
+/* CvmCtl register field definitions */
+#define CVMCTL_IPPCI_SHIFT 7
+#define CVMCTL_IPPCI (_U64CAST_(0x7) << CVMCTL_IPPCI_SHIFT)
+#define CVMCTL_IPTI_SHIFT 4
+#define CVMCTL_IPTI (_U64CAST_(0x7) << CVMCTL_IPTI_SHIFT)
+
+/* CvmMemCtl2 register field definitions */
+#define CVMMEMCTL2_INHIBITTS (_U64CAST_(1) << 17)
+
+/* CvmVMConfig register field definitions */
+#define CVMVMCONF_DGHT (_U64CAST_(1) << 60)
+#define CVMVMCONF_MMUSIZEM1_S 12
+#define CVMVMCONF_MMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_MMUSIZEM1_S)
+#define CVMVMCONF_RMMUSIZEM1_S 0
+#define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S)
+
/*
* Coprocessor 1 (FPU) register names
*/
@@ -779,9 +1059,12 @@
/*
* Bits 22:20 of the FPU Status Register will be read as 0,
* and should be written as zero.
+ * MAC2008 was removed in Release 5 so we still treat it as
+ * reserved.
*/
#define FPU_CSR_RSVD (_ULCAST_(7) << 20)
+#define FPU_CSR_MAC2008 (_ULCAST_(1) << 20)
#define FPU_CSR_ABS2008 (_ULCAST_(1) << 19)
#define FPU_CSR_NAN2008 (_ULCAST_(1) << 18)
@@ -826,14 +1109,14 @@
* Macros for handling the ISA mode bit for MIPS16 and microMIPS.
*/
#if defined(CONFIG_SYS_SUPPORTS_MIPS16) || \
- defined(CONFIG_SYS_SUPPORTS_MICROMIPS)
+ defined(CONFIG_SYS_SUPPORTS_MICROMIPS)
#define get_isa16_mode(x) ((x) & 0x1)
#define msk_isa16_mode(x) ((x) & ~0x1)
-#define set_isa16_mode(x) do { (x) |= 0x1; } while (0)
+#define set_isa16_mode(x) do { (x) |= 0x1; } while(0)
#else
#define get_isa16_mode(x) 0
#define msk_isa16_mode(x) (x)
-#define set_isa16_mode(x) do { } while (0)
+#define set_isa16_mode(x) do { } while(0)
#endif
/*
@@ -848,6 +1131,123 @@ static inline int mm_insn_16bit(u16 insn)
}
/*
+ * Helper macros for generating raw instruction encodings in inline asm.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+#define _ASM_INSN16_IF_MM(_enc) \
+ ".insn\n\t" \
+ ".hword (" #_enc ")\n\t"
+#define _ASM_INSN32_IF_MM(_enc) \
+ ".insn\n\t" \
+ ".hword ((" #_enc ") >> 16)\n\t" \
+ ".hword ((" #_enc ") & 0xffff)\n\t"
+#else
+#define _ASM_INSN_IF_MIPS(_enc) \
+ ".insn\n\t" \
+ ".word (" #_enc ")\n\t"
+#endif
+
+#ifndef _ASM_INSN16_IF_MM
+#define _ASM_INSN16_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN32_IF_MM
+#define _ASM_INSN32_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN_IF_MIPS
+#define _ASM_INSN_IF_MIPS(_enc)
+#endif
+
+/*
+ * parse_r var, r - Helper assembler macro for parsing register names.
+ *
+ * This converts the register name in $n form provided in \r to the
+ * corresponding register number, which is assigned to the variable \var. It is
+ * needed to allow explicit encoding of instructions in inline assembly where
+ * registers are chosen by the compiler in $n form, allowing us to avoid using
+ * fixed register numbers.
+ *
+ * It also allows newer instructions (not implemented by the assembler) to be
+ * transparently implemented using assembler macros, instead of needing separate
+ * cases depending on toolchain support.
+ *
+ * Simple usage example:
+ * __asm__ __volatile__("parse_r __rt, %0\n\t"
+ * ".insn\n\t"
+ * "# di %0\n\t"
+ * ".word (0x41606000 | (__rt << 16))"
+ * : "=r" (status);
+ */
+
+/* Match an individual register number and assign to \var */
+#define _IFC_REG(n) \
+ ".ifc \\r, $" #n "\n\t" \
+ "\\var = " #n "\n\t" \
+ ".endif\n\t"
+
+__asm__(".macro parse_r var r\n\t"
+ "\\var = -1\n\t"
+ _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3)
+ _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7)
+ _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11)
+ _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15)
+ _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19)
+ _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23)
+ _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27)
+ _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31)
+ ".iflt \\var\n\t"
+ ".error \"Unable to parse register name \\r\"\n\t"
+ ".endif\n\t"
+ ".endm");
+
+#undef _IFC_REG
+
+/*
+ * C macros for generating assembler macros for common instruction formats.
+ *
+ * The names of the operands can be chosen by the caller, and the encoding of
+ * register operand \<Rn> is assigned to __<Rn> where it can be accessed from
+ * the ENC encodings.
+ */
+
+/* Instructions with no operands */
+#define _ASM_MACRO_0(OP, ENC) \
+ __asm__(".macro " #OP "\n\t" \
+ ENC \
+ ".endm")
+
+/* Instructions with 1 register operand & 1 immediate operand */
+#define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #I2 "\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ ENC \
+ ".endm")
+
+/* Instructions with 2 register operands */
+#define _ASM_MACRO_2R(OP, R1, R2, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #R2 "\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ "parse_r __" #R2 ", \\" #R2 "\n\t" \
+ ENC \
+ ".endm")
+
+/* Instructions with 3 register operands */
+#define _ASM_MACRO_3R(OP, R1, R2, R3, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #R2 ", " #R3 "\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ "parse_r __" #R2 ", \\" #R2 "\n\t" \
+ "parse_r __" #R3 ", \\" #R3 "\n\t" \
+ ENC \
+ ".endm")
+
+/* Instructions with 2 register operands and 1 optional select operand */
+#define _ASM_MACRO_2R_1S(OP, R1, R2, SEL3, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #R2 ", " #SEL3 " = 0\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ "parse_r __" #R2 ", \\" #R2 "\n\t" \
+ ENC \
+ ".endm")
+
+/*
* TLB Invalidate Flush
*/
static inline void tlbinvf(void)
@@ -855,7 +1255,9 @@ static inline void tlbinvf(void)
__asm__ __volatile__(
".set push\n\t"
".set noreorder\n\t"
- ".word 0x42000004\n\t" /* tlbinvf */
+ "# tlbinvf\n\t"
+ _ASM_INSN_IF_MIPS(0x42000004)
+ _ASM_INSN32_IF_MM(0x0000537c)
".set pop");
}
@@ -910,40 +1312,55 @@ do { \
* Macros to access the system control coprocessor
*/
-#define __read_32bit_c0_register(source, sel) \
+#define ___read_32bit_c0_register(source, sel, vol) \
({ unsigned int __res; \
if (sel == 0) \
- __asm__ __volatile__( \
+ __asm__ vol( \
"mfc0\t%0, " #source "\n\t" \
: "=r" (__res)); \
else \
- __asm__ __volatile__( \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips32\n\t" \
"mfc0\t%0, " #source ", " #sel "\n\t" \
- ".set\tmips0\n\t" \
+ ".set\tpop\n\t" \
: "=r" (__res)); \
__res; \
})
-#define __read_64bit_c0_register(source, sel) \
+#define ___read_64bit_c0_register(source, sel, vol) \
({ unsigned long long __res; \
if (sizeof(unsigned long) == 4) \
- __res = __read_64bit_c0_split(source, sel); \
+ __res = __read_64bit_c0_split(source, sel, vol); \
else if (sel == 0) \
- __asm__ __volatile__( \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips3\n\t" \
"dmfc0\t%0, " #source "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: "=r" (__res)); \
else \
- __asm__ __volatile__( \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dmfc0\t%0, " #source ", " #sel "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: "=r" (__res)); \
__res; \
})
+#define __read_32bit_c0_register(source, sel) \
+ ___read_32bit_c0_register(source, sel, __volatile__)
+
+#define __read_const_32bit_c0_register(source, sel) \
+ ___read_32bit_c0_register(source, sel,)
+
+#define __read_64bit_c0_register(source, sel) \
+ ___read_64bit_c0_register(source, sel, __volatile__)
+
+#define __read_const_64bit_c0_register(source, sel) \
+ ___read_64bit_c0_register(source, sel,)
+
#define __write_32bit_c0_register(register, sel, value) \
do { \
if (sel == 0) \
@@ -952,9 +1369,10 @@ do { \
: : "Jr" ((unsigned int)(value))); \
else \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips32\n\t" \
"mtc0\t%z0, " #register ", " #sel "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: : "Jr" ((unsigned int)(value))); \
} while (0)
@@ -964,15 +1382,17 @@ do { \
__write_64bit_c0_split(register, sel, value); \
else if (sel == 0) \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips3\n\t" \
"dmtc0\t%z0, " #register "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: : "Jr" (value)); \
else \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dmtc0\t%z0, " #register ", " #sel "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: : "Jr" (value)); \
} while (0)
@@ -981,6 +1401,11 @@ do { \
(unsigned long) __read_32bit_c0_register(reg, sel) : \
(unsigned long) __read_64bit_c0_register(reg, sel))
+#define __read_const_ulong_c0_register(reg, sel) \
+ ((sizeof(unsigned long) == 4) ? \
+ (unsigned long) __read_const_32bit_c0_register(reg, sel) : \
+ (unsigned long) __read_const_64bit_c0_register(reg, sel))
+
#define __write_ulong_c0_register(reg, sel, val) \
do { \
if (sizeof(unsigned long) == 4) \
@@ -1011,27 +1436,27 @@ do { \
* These versions are only needed for systems with more than 38 bits of
* physical address space running the 32-bit kernel. That's none atm :-)
*/
-#define __read_64bit_c0_split(source, sel) \
+#define __read_64bit_c0_split(source, sel, vol) \
({ \
unsigned long long __val; \
\
if (sel == 0) \
- __asm__ __volatile__( \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
- "dmfc0\t%M0, " #source "\n\t" \
- "dsll\t%L0, %M0, 32\n\t" \
- "dsra\t%M0, %M0, 32\n\t" \
- "dsra\t%L0, %L0, 32\n\t" \
- ".set\tmips0" \
+ "dmfc0\t%L0, " #source "\n\t" \
+ "dsra\t%M0, %L0, 32\n\t" \
+ "sll\t%L0, %L0, 0\n\t" \
+ ".set\tpop" \
: "=r" (__val)); \
else \
- __asm__ __volatile__( \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
- "dmfc0\t%M0, " #source ", " #sel "\n\t" \
- "dsll\t%L0, %M0, 32\n\t" \
- "dsra\t%M0, %M0, 32\n\t" \
- "dsra\t%L0, %L0, 32\n\t" \
- ".set\tmips0" \
+ "dmfc0\t%L0, " #source ", " #sel "\n\t" \
+ "dsra\t%M0, %L0, 32\n\t" \
+ "sll\t%L0, %L0, 0\n\t" \
+ ".set\tpop" \
: "=r" (__val)); \
\
__val; \
@@ -1039,60 +1464,77 @@ do { \
#define __write_64bit_c0_split(source, sel, val) \
do { \
- if (sel == 0) \
+ unsigned long long __tmp = (val); \
+ if (MIPS_ISA_REV >= 2) \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\t" MIPS_ISA_LEVEL "\n\t" \
+ "dins\t%L0, %M0, 32, 32\n\t" \
+ "dmtc0\t%L0, " #source ", " #sel "\n\t" \
+ ".set\tpop" \
+ : "+r" (__tmp)); \
+ else if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source "\n\t" \
- ".set\tmips0" \
- : : "r" (val)); \
+ ".set\tpop" \
+ : "+r" (__tmp)); \
else \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source ", " #sel "\n\t" \
- ".set\tmips0" \
- : : "r" (val)); \
+ ".set\tpop" \
+ : "+r" (__tmp)); \
} while (0)
-#define __readx_32bit_c0_register(source) \
+#ifndef TOOLCHAIN_SUPPORTS_XPA
+_ASM_MACRO_2R_1S(mfhc0, rt, rs, sel,
+ _ASM_INSN_IF_MIPS(0x40400000 | __rt << 16 | __rs << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x000000f4 | __rt << 21 | __rs << 16 | \\sel << 11));
+_ASM_MACRO_2R_1S(mthc0, rt, rd, sel,
+ _ASM_INSN_IF_MIPS(0x40c00000 | __rt << 16 | __rd << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x000002f4 | __rt << 21 | __rd << 16 | \\sel << 11));
+#define _ASM_SET_XPA ""
+#else /* !TOOLCHAIN_SUPPORTS_XPA */
+#define _ASM_SET_XPA ".set\txpa\n\t"
+#endif
+
+#define __readx_32bit_c0_register(source, sel) \
({ \
unsigned int __res; \
\
__asm__ __volatile__( \
" .set push \n" \
- " .set noat \n" \
" .set mips32r2 \n" \
- " .insn \n" \
- " # mfhc0 $1, %1 \n" \
- " .word (0x40410000 | ((%1 & 0x1f) << 11)) \n" \
- " move %0, $1 \n" \
+ _ASM_SET_XPA \
+ " mfhc0 %0, " #source ", %1 \n" \
" .set pop \n" \
: "=r" (__res) \
- : "i" (source)); \
+ : "i" (sel)); \
__res; \
})
-#define __writex_32bit_c0_register(register, value) \
-({ \
+#define __writex_32bit_c0_register(register, sel, value) \
+do { \
__asm__ __volatile__( \
" .set push \n" \
- " .set noat \n" \
" .set mips32r2 \n" \
- " move $1, %0 \n" \
- " # mthc0 $1, %1 \n" \
- " .insn \n" \
- " .word (0x40c10000 | ((%1 & 0x1f) << 11)) \n" \
+ _ASM_SET_XPA \
+ " mthc0 %z0, " #register ", %1 \n" \
" .set pop \n" \
: \
- : "r" (value), "i" (register)); \
-})
+ : "Jr" (value), "i" (sel)); \
+} while (0)
#define read_c0_index() __read_32bit_c0_register($0, 0)
#define write_c0_index(val) __write_32bit_c0_register($0, 0, val)
@@ -1103,24 +1545,35 @@ do { \
#define read_c0_entrylo0() __read_ulong_c0_register($2, 0)
#define write_c0_entrylo0(val) __write_ulong_c0_register($2, 0, val)
-#define readx_c0_entrylo0() __readx_32bit_c0_register(2)
-#define writex_c0_entrylo0(val) __writex_32bit_c0_register(2, val)
+#define readx_c0_entrylo0() __readx_32bit_c0_register($2, 0)
+#define writex_c0_entrylo0(val) __writex_32bit_c0_register($2, 0, val)
#define read_c0_entrylo1() __read_ulong_c0_register($3, 0)
#define write_c0_entrylo1(val) __write_ulong_c0_register($3, 0, val)
-#define readx_c0_entrylo1() __readx_32bit_c0_register(3)
-#define writex_c0_entrylo1(val) __writex_32bit_c0_register(3, val)
+#define readx_c0_entrylo1() __readx_32bit_c0_register($3, 0)
+#define writex_c0_entrylo1(val) __writex_32bit_c0_register($3, 0, val)
#define read_c0_conf() __read_32bit_c0_register($3, 0)
#define write_c0_conf(val) __write_32bit_c0_register($3, 0, val)
+#define read_c0_globalnumber() __read_32bit_c0_register($3, 1)
+
#define read_c0_context() __read_ulong_c0_register($4, 0)
#define write_c0_context(val) __write_ulong_c0_register($4, 0, val)
+#define read_c0_contextconfig() __read_32bit_c0_register($4, 1)
+#define write_c0_contextconfig(val) __write_32bit_c0_register($4, 1, val)
+
#define read_c0_userlocal() __read_ulong_c0_register($4, 2)
#define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val)
+#define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3)
+#define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val)
+
+#define read_c0_memorymapid() __read_32bit_c0_register($4, 5)
+#define write_c0_memorymapid(val) __write_32bit_c0_register($4, 5, val)
+
#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)
@@ -1138,6 +1591,9 @@ do { \
#define read_c0_badvaddr() __read_ulong_c0_register($8, 0)
#define write_c0_badvaddr(val) __write_ulong_c0_register($8, 0, val)
+#define read_c0_badinstr() __read_32bit_c0_register($8, 1)
+#define read_c0_badinstrp() __read_32bit_c0_register($8, 2)
+
#define read_c0_count() __read_32bit_c0_register($9, 0)
#define write_c0_count(val) __write_32bit_c0_register($9, 0, val)
@@ -1150,9 +1606,21 @@ do { \
#define read_c0_entryhi() __read_ulong_c0_register($10, 0)
#define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
+#define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+#define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+
+#define read_c0_guestctl2() __read_32bit_c0_register($10, 5)
+#define write_c0_guestctl2(val) __write_32bit_c0_register($10, 5, val)
+
+#define read_c0_guestctl3() __read_32bit_c0_register($10, 6)
+#define write_c0_guestctl3(val) __write_32bit_c0_register($10, 6, val)
+
#define read_c0_compare() __read_32bit_c0_register($11, 0)
#define write_c0_compare(val) __write_32bit_c0_register($11, 0, val)
+#define read_c0_guestctl0ext() __read_32bit_c0_register($11, 4)
+#define write_c0_guestctl0ext(val) __write_32bit_c0_register($11, 4, val)
+
#define read_c0_compare2() __read_32bit_c0_register($11, 6) /* pnx8550 */
#define write_c0_compare2(val) __write_32bit_c0_register($11, 6, val)
@@ -1163,13 +1631,19 @@ do { \
#define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
+#define read_c0_guestctl0() __read_32bit_c0_register($12, 6)
+#define write_c0_guestctl0(val) __write_32bit_c0_register($12, 6, val)
+
+#define read_c0_gtoffset() __read_32bit_c0_register($12, 7)
+#define write_c0_gtoffset(val) __write_32bit_c0_register($12, 7, val)
+
#define read_c0_cause() __read_32bit_c0_register($13, 0)
#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
#define read_c0_epc() __read_ulong_c0_register($14, 0)
#define write_c0_epc(val) __write_ulong_c0_register($14, 0, val)
-#define read_c0_prid() __read_32bit_c0_register($15, 0)
+#define read_c0_prid() __read_const_32bit_c0_register($15, 0)
#define read_c0_cmgcrbase() __read_ulong_c0_register($15, 3)
@@ -1348,6 +1822,9 @@ do { \
#define read_c0_ebase() __read_32bit_c0_register($15, 1)
#define write_c0_ebase(val) __write_32bit_c0_register($15, 1, val)
+#define read_c0_ebase_64() __read_64bit_c0_register($15, 1)
+#define write_c0_ebase_64(val) __write_64bit_c0_register($15, 1, val)
+
#define read_c0_cdmmbase() __read_ulong_c0_register($15, 2)
#define write_c0_cdmmbase(val) __write_ulong_c0_register($15, 2, val)
@@ -1374,6 +1851,12 @@ do { \
#define read_c0_pwctl() __read_32bit_c0_register($6, 6)
#define write_c0_pwctl(val) __write_32bit_c0_register($6, 6, val)
+#define read_c0_pgd() __read_64bit_c0_register($9, 7)
+#define write_c0_pgd(val) __write_64bit_c0_register($9, 7, val)
+
+#define read_c0_kpgd() __read_64bit_c0_register($31, 7)
+#define write_c0_kpgd(val) __write_64bit_c0_register($31, 7, val)
+
/* Cavium OCTEON (cnMIPS) */
#define read_c0_cvmcount() __read_ulong_c0_register($9, 6)
#define write_c0_cvmcount(val) __write_ulong_c0_register($9, 6, val)
@@ -1383,6 +1866,13 @@ do { \
#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
+
+#define read_c0_cvmmemctl2() __read_64bit_c0_register($16, 6)
+#define write_c0_cvmmemctl2(val) __write_64bit_c0_register($16, 6, val)
+
+#define read_c0_cvmvmconfig() __read_64bit_c0_register($16, 7)
+#define write_c0_cvmvmconfig(val) __write_64bit_c0_register($16, 7, val)
+
/*
* The cacheerr registers are not standardized. On OCTEON, they are
* 64 bits wide.
@@ -1438,6 +1928,306 @@ do { \
#define read_c0_brcm_sleepcount() __read_32bit_c0_register($22, 7)
#define write_c0_brcm_sleepcount(val) __write_32bit_c0_register($22, 7, val)
+/* Ingenic page ctrl register */
+#define write_c0_page_ctrl(val) __write_32bit_c0_register($5, 4, val)
+
+/*
+ * Macros to access the guest system control coprocessor
+ */
+
+#ifndef TOOLCHAIN_SUPPORTS_VIRT
+_ASM_MACRO_2R_1S(mfgc0, rt, rs, sel,
+ _ASM_INSN_IF_MIPS(0x40600000 | __rt << 16 | __rs << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x000004fc | __rt << 21 | __rs << 16 | \\sel << 11));
+_ASM_MACRO_2R_1S(dmfgc0, rt, rs, sel,
+ _ASM_INSN_IF_MIPS(0x40600100 | __rt << 16 | __rs << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x580004fc | __rt << 21 | __rs << 16 | \\sel << 11));
+_ASM_MACRO_2R_1S(mtgc0, rt, rd, sel,
+ _ASM_INSN_IF_MIPS(0x40600200 | __rt << 16 | __rd << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x000006fc | __rt << 21 | __rd << 16 | \\sel << 11));
+_ASM_MACRO_2R_1S(dmtgc0, rt, rd, sel,
+ _ASM_INSN_IF_MIPS(0x40600300 | __rt << 16 | __rd << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x580006fc | __rt << 21 | __rd << 16 | \\sel << 11));
+_ASM_MACRO_0(tlbgp, _ASM_INSN_IF_MIPS(0x42000010)
+ _ASM_INSN32_IF_MM(0x0000017c));
+_ASM_MACRO_0(tlbgr, _ASM_INSN_IF_MIPS(0x42000009)
+ _ASM_INSN32_IF_MM(0x0000117c));
+_ASM_MACRO_0(tlbgwi, _ASM_INSN_IF_MIPS(0x4200000a)
+ _ASM_INSN32_IF_MM(0x0000217c));
+_ASM_MACRO_0(tlbgwr, _ASM_INSN_IF_MIPS(0x4200000e)
+ _ASM_INSN32_IF_MM(0x0000317c));
+_ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
+ _ASM_INSN32_IF_MM(0x0000517c));
+#define _ASM_SET_VIRT ""
+#else /* !TOOLCHAIN_SUPPORTS_VIRT */
+#define _ASM_SET_VIRT ".set\tvirt\n\t"
+#endif
+
+#define __read_32bit_gc0_register(source, sel) \
+({ int __res; \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips32r2\n\t" \
+ _ASM_SET_VIRT \
+ "mfgc0\t%0, " #source ", %1\n\t" \
+ ".set\tpop" \
+ : "=r" (__res) \
+ : "i" (sel)); \
+ __res; \
+})
+
+#define __read_64bit_gc0_register(source, sel) \
+({ unsigned long long __res; \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips64r2\n\t" \
+ _ASM_SET_VIRT \
+ "dmfgc0\t%0, " #source ", %1\n\t" \
+ ".set\tpop" \
+ : "=r" (__res) \
+ : "i" (sel)); \
+ __res; \
+})
+
+#define __write_32bit_gc0_register(register, sel, value) \
+do { \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips32r2\n\t" \
+ _ASM_SET_VIRT \
+ "mtgc0\t%z0, " #register ", %1\n\t" \
+ ".set\tpop" \
+ : : "Jr" ((unsigned int)(value)), \
+ "i" (sel)); \
+} while (0)
+
+#define __write_64bit_gc0_register(register, sel, value) \
+do { \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips64r2\n\t" \
+ _ASM_SET_VIRT \
+ "dmtgc0\t%z0, " #register ", %1\n\t" \
+ ".set\tpop" \
+ : : "Jr" (value), \
+ "i" (sel)); \
+} while (0)
+
+#define __read_ulong_gc0_register(reg, sel) \
+ ((sizeof(unsigned long) == 4) ? \
+ (unsigned long) __read_32bit_gc0_register(reg, sel) : \
+ (unsigned long) __read_64bit_gc0_register(reg, sel))
+
+#define __write_ulong_gc0_register(reg, sel, val) \
+do { \
+ if (sizeof(unsigned long) == 4) \
+ __write_32bit_gc0_register(reg, sel, val); \
+ else \
+ __write_64bit_gc0_register(reg, sel, val); \
+} while (0)
+
+#define read_gc0_index() __read_32bit_gc0_register($0, 0)
+#define write_gc0_index(val) __write_32bit_gc0_register($0, 0, val)
+
+#define read_gc0_entrylo0() __read_ulong_gc0_register($2, 0)
+#define write_gc0_entrylo0(val) __write_ulong_gc0_register($2, 0, val)
+
+#define read_gc0_entrylo1() __read_ulong_gc0_register($3, 0)
+#define write_gc0_entrylo1(val) __write_ulong_gc0_register($3, 0, val)
+
+#define read_gc0_context() __read_ulong_gc0_register($4, 0)
+#define write_gc0_context(val) __write_ulong_gc0_register($4, 0, val)
+
+#define read_gc0_contextconfig() __read_32bit_gc0_register($4, 1)
+#define write_gc0_contextconfig(val) __write_32bit_gc0_register($4, 1, val)
+
+#define read_gc0_userlocal() __read_ulong_gc0_register($4, 2)
+#define write_gc0_userlocal(val) __write_ulong_gc0_register($4, 2, val)
+
+#define read_gc0_xcontextconfig() __read_ulong_gc0_register($4, 3)
+#define write_gc0_xcontextconfig(val) __write_ulong_gc0_register($4, 3, val)
+
+#define read_gc0_pagemask() __read_32bit_gc0_register($5, 0)
+#define write_gc0_pagemask(val) __write_32bit_gc0_register($5, 0, val)
+
+#define read_gc0_pagegrain() __read_32bit_gc0_register($5, 1)
+#define write_gc0_pagegrain(val) __write_32bit_gc0_register($5, 1, val)
+
+#define read_gc0_segctl0() __read_ulong_gc0_register($5, 2)
+#define write_gc0_segctl0(val) __write_ulong_gc0_register($5, 2, val)
+
+#define read_gc0_segctl1() __read_ulong_gc0_register($5, 3)
+#define write_gc0_segctl1(val) __write_ulong_gc0_register($5, 3, val)
+
+#define read_gc0_segctl2() __read_ulong_gc0_register($5, 4)
+#define write_gc0_segctl2(val) __write_ulong_gc0_register($5, 4, val)
+
+#define read_gc0_pwbase() __read_ulong_gc0_register($5, 5)
+#define write_gc0_pwbase(val) __write_ulong_gc0_register($5, 5, val)
+
+#define read_gc0_pwfield() __read_ulong_gc0_register($5, 6)
+#define write_gc0_pwfield(val) __write_ulong_gc0_register($5, 6, val)
+
+#define read_gc0_pwsize() __read_ulong_gc0_register($5, 7)
+#define write_gc0_pwsize(val) __write_ulong_gc0_register($5, 7, val)
+
+#define read_gc0_wired() __read_32bit_gc0_register($6, 0)
+#define write_gc0_wired(val) __write_32bit_gc0_register($6, 0, val)
+
+#define read_gc0_pwctl() __read_32bit_gc0_register($6, 6)
+#define write_gc0_pwctl(val) __write_32bit_gc0_register($6, 6, val)
+
+#define read_gc0_hwrena() __read_32bit_gc0_register($7, 0)
+#define write_gc0_hwrena(val) __write_32bit_gc0_register($7, 0, val)
+
+#define read_gc0_badvaddr() __read_ulong_gc0_register($8, 0)
+#define write_gc0_badvaddr(val) __write_ulong_gc0_register($8, 0, val)
+
+#define read_gc0_badinstr() __read_32bit_gc0_register($8, 1)
+#define write_gc0_badinstr(val) __write_32bit_gc0_register($8, 1, val)
+
+#define read_gc0_badinstrp() __read_32bit_gc0_register($8, 2)
+#define write_gc0_badinstrp(val) __write_32bit_gc0_register($8, 2, val)
+
+#define read_gc0_count() __read_32bit_gc0_register($9, 0)
+
+#define read_gc0_entryhi() __read_ulong_gc0_register($10, 0)
+#define write_gc0_entryhi(val) __write_ulong_gc0_register($10, 0, val)
+
+#define read_gc0_compare() __read_32bit_gc0_register($11, 0)
+#define write_gc0_compare(val) __write_32bit_gc0_register($11, 0, val)
+
+#define read_gc0_status() __read_32bit_gc0_register($12, 0)
+#define write_gc0_status(val) __write_32bit_gc0_register($12, 0, val)
+
+#define read_gc0_intctl() __read_32bit_gc0_register($12, 1)
+#define write_gc0_intctl(val) __write_32bit_gc0_register($12, 1, val)
+
+#define read_gc0_cause() __read_32bit_gc0_register($13, 0)
+#define write_gc0_cause(val) __write_32bit_gc0_register($13, 0, val)
+
+#define read_gc0_epc() __read_ulong_gc0_register($14, 0)
+#define write_gc0_epc(val) __write_ulong_gc0_register($14, 0, val)
+
+#define read_gc0_prid() __read_32bit_gc0_register($15, 0)
+
+#define read_gc0_ebase() __read_32bit_gc0_register($15, 1)
+#define write_gc0_ebase(val) __write_32bit_gc0_register($15, 1, val)
+
+#define read_gc0_ebase_64() __read_64bit_gc0_register($15, 1)
+#define write_gc0_ebase_64(val) __write_64bit_gc0_register($15, 1, val)
+
+#define read_gc0_config() __read_32bit_gc0_register($16, 0)
+#define read_gc0_config1() __read_32bit_gc0_register($16, 1)
+#define read_gc0_config2() __read_32bit_gc0_register($16, 2)
+#define read_gc0_config3() __read_32bit_gc0_register($16, 3)
+#define read_gc0_config4() __read_32bit_gc0_register($16, 4)
+#define read_gc0_config5() __read_32bit_gc0_register($16, 5)
+#define read_gc0_config6() __read_32bit_gc0_register($16, 6)
+#define read_gc0_config7() __read_32bit_gc0_register($16, 7)
+#define write_gc0_config(val) __write_32bit_gc0_register($16, 0, val)
+#define write_gc0_config1(val) __write_32bit_gc0_register($16, 1, val)
+#define write_gc0_config2(val) __write_32bit_gc0_register($16, 2, val)
+#define write_gc0_config3(val) __write_32bit_gc0_register($16, 3, val)
+#define write_gc0_config4(val) __write_32bit_gc0_register($16, 4, val)
+#define write_gc0_config5(val) __write_32bit_gc0_register($16, 5, val)
+#define write_gc0_config6(val) __write_32bit_gc0_register($16, 6, val)
+#define write_gc0_config7(val) __write_32bit_gc0_register($16, 7, val)
+
+#define read_gc0_lladdr() __read_ulong_gc0_register($17, 0)
+#define write_gc0_lladdr(val) __write_ulong_gc0_register($17, 0, val)
+
+#define read_gc0_watchlo0() __read_ulong_gc0_register($18, 0)
+#define read_gc0_watchlo1() __read_ulong_gc0_register($18, 1)
+#define read_gc0_watchlo2() __read_ulong_gc0_register($18, 2)
+#define read_gc0_watchlo3() __read_ulong_gc0_register($18, 3)
+#define read_gc0_watchlo4() __read_ulong_gc0_register($18, 4)
+#define read_gc0_watchlo5() __read_ulong_gc0_register($18, 5)
+#define read_gc0_watchlo6() __read_ulong_gc0_register($18, 6)
+#define read_gc0_watchlo7() __read_ulong_gc0_register($18, 7)
+#define write_gc0_watchlo0(val) __write_ulong_gc0_register($18, 0, val)
+#define write_gc0_watchlo1(val) __write_ulong_gc0_register($18, 1, val)
+#define write_gc0_watchlo2(val) __write_ulong_gc0_register($18, 2, val)
+#define write_gc0_watchlo3(val) __write_ulong_gc0_register($18, 3, val)
+#define write_gc0_watchlo4(val) __write_ulong_gc0_register($18, 4, val)
+#define write_gc0_watchlo5(val) __write_ulong_gc0_register($18, 5, val)
+#define write_gc0_watchlo6(val) __write_ulong_gc0_register($18, 6, val)
+#define write_gc0_watchlo7(val) __write_ulong_gc0_register($18, 7, val)
+
+#define read_gc0_watchhi0() __read_32bit_gc0_register($19, 0)
+#define read_gc0_watchhi1() __read_32bit_gc0_register($19, 1)
+#define read_gc0_watchhi2() __read_32bit_gc0_register($19, 2)
+#define read_gc0_watchhi3() __read_32bit_gc0_register($19, 3)
+#define read_gc0_watchhi4() __read_32bit_gc0_register($19, 4)
+#define read_gc0_watchhi5() __read_32bit_gc0_register($19, 5)
+#define read_gc0_watchhi6() __read_32bit_gc0_register($19, 6)
+#define read_gc0_watchhi7() __read_32bit_gc0_register($19, 7)
+#define write_gc0_watchhi0(val) __write_32bit_gc0_register($19, 0, val)
+#define write_gc0_watchhi1(val) __write_32bit_gc0_register($19, 1, val)
+#define write_gc0_watchhi2(val) __write_32bit_gc0_register($19, 2, val)
+#define write_gc0_watchhi3(val) __write_32bit_gc0_register($19, 3, val)
+#define write_gc0_watchhi4(val) __write_32bit_gc0_register($19, 4, val)
+#define write_gc0_watchhi5(val) __write_32bit_gc0_register($19, 5, val)
+#define write_gc0_watchhi6(val) __write_32bit_gc0_register($19, 6, val)
+#define write_gc0_watchhi7(val) __write_32bit_gc0_register($19, 7, val)
+
+#define read_gc0_xcontext() __read_ulong_gc0_register($20, 0)
+#define write_gc0_xcontext(val) __write_ulong_gc0_register($20, 0, val)
+
+#define read_gc0_perfctrl0() __read_32bit_gc0_register($25, 0)
+#define write_gc0_perfctrl0(val) __write_32bit_gc0_register($25, 0, val)
+#define read_gc0_perfcntr0() __read_32bit_gc0_register($25, 1)
+#define write_gc0_perfcntr0(val) __write_32bit_gc0_register($25, 1, val)
+#define read_gc0_perfcntr0_64() __read_64bit_gc0_register($25, 1)
+#define write_gc0_perfcntr0_64(val) __write_64bit_gc0_register($25, 1, val)
+#define read_gc0_perfctrl1() __read_32bit_gc0_register($25, 2)
+#define write_gc0_perfctrl1(val) __write_32bit_gc0_register($25, 2, val)
+#define read_gc0_perfcntr1() __read_32bit_gc0_register($25, 3)
+#define write_gc0_perfcntr1(val) __write_32bit_gc0_register($25, 3, val)
+#define read_gc0_perfcntr1_64() __read_64bit_gc0_register($25, 3)
+#define write_gc0_perfcntr1_64(val) __write_64bit_gc0_register($25, 3, val)
+#define read_gc0_perfctrl2() __read_32bit_gc0_register($25, 4)
+#define write_gc0_perfctrl2(val) __write_32bit_gc0_register($25, 4, val)
+#define read_gc0_perfcntr2() __read_32bit_gc0_register($25, 5)
+#define write_gc0_perfcntr2(val) __write_32bit_gc0_register($25, 5, val)
+#define read_gc0_perfcntr2_64() __read_64bit_gc0_register($25, 5)
+#define write_gc0_perfcntr2_64(val) __write_64bit_gc0_register($25, 5, val)
+#define read_gc0_perfctrl3() __read_32bit_gc0_register($25, 6)
+#define write_gc0_perfctrl3(val) __write_32bit_gc0_register($25, 6, val)
+#define read_gc0_perfcntr3() __read_32bit_gc0_register($25, 7)
+#define write_gc0_perfcntr3(val) __write_32bit_gc0_register($25, 7, val)
+#define read_gc0_perfcntr3_64() __read_64bit_gc0_register($25, 7)
+#define write_gc0_perfcntr3_64(val) __write_64bit_gc0_register($25, 7, val)
+
+#define read_gc0_errorepc() __read_ulong_gc0_register($30, 0)
+#define write_gc0_errorepc(val) __write_ulong_gc0_register($30, 0, val)
+
+#define read_gc0_kscratch1() __read_ulong_gc0_register($31, 2)
+#define read_gc0_kscratch2() __read_ulong_gc0_register($31, 3)
+#define read_gc0_kscratch3() __read_ulong_gc0_register($31, 4)
+#define read_gc0_kscratch4() __read_ulong_gc0_register($31, 5)
+#define read_gc0_kscratch5() __read_ulong_gc0_register($31, 6)
+#define read_gc0_kscratch6() __read_ulong_gc0_register($31, 7)
+#define write_gc0_kscratch1(val) __write_ulong_gc0_register($31, 2, val)
+#define write_gc0_kscratch2(val) __write_ulong_gc0_register($31, 3, val)
+#define write_gc0_kscratch3(val) __write_ulong_gc0_register($31, 4, val)
+#define write_gc0_kscratch4(val) __write_ulong_gc0_register($31, 5, val)
+#define write_gc0_kscratch5(val) __write_ulong_gc0_register($31, 6, val)
+#define write_gc0_kscratch6(val) __write_ulong_gc0_register($31, 7, val)
+
+/* Cavium OCTEON (cnMIPS) */
+#define read_gc0_cvmcount() __read_ulong_gc0_register($9, 6)
+#define write_gc0_cvmcount(val) __write_ulong_gc0_register($9, 6, val)
+
+#define read_gc0_cvmctl() __read_64bit_gc0_register($9, 7)
+#define write_gc0_cvmctl(val) __write_64bit_gc0_register($9, 7, val)
+
+#define read_gc0_cvmmemctl() __read_64bit_gc0_register($11, 7)
+#define write_gc0_cvmmemctl(val) __write_64bit_gc0_register($11, 7, val)
+
+#define read_gc0_cvmmemctl2() __read_64bit_gc0_register($16, 6)
+#define write_gc0_cvmmemctl2(val) __write_64bit_gc0_register($16, 6, val)
+
/*
* Macros to access the floating point coprocessor control registers
*/
@@ -1459,7 +2249,7 @@ do { \
})
#define _write_32bit_cp1_register(dest, val, gas_hardfloat) \
-({ \
+do { \
__asm__ __volatile__( \
" .set push \n" \
" .set reorder \n" \
@@ -1467,7 +2257,7 @@ do { \
" ctc1 %0,"STR(dest)" \n" \
" .set pop \n" \
: : "r" (val)); \
-})
+} while (0)
#ifdef GAS_HAS_SET_HARDFLOAT
#define read_32bit_cp1_register(source) \
@@ -1481,13 +2271,14 @@ do { \
_write_32bit_cp1_register(dest, val, )
#endif
-#ifdef HAVE_AS_DSP
+#ifdef TOOLCHAIN_SUPPORTS_DSP
#define rddsp(mask) \
({ \
unsigned int __dspctl; \
\
__asm__ __volatile__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" rddsp %0, %x1 \n" \
" .set pop \n" \
@@ -1497,25 +2288,27 @@ do { \
})
#define wrdsp(val, mask) \
-({ \
+do { \
__asm__ __volatile__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" wrdsp %0, %x1 \n" \
" .set pop \n" \
: \
: "r" (val), "i" (mask)); \
-})
+} while (0)
#define mflo0() \
({ \
long mflo0; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac0 \n" \
- " .set pop \n" \
- : "=r" (mflo0)); \
+ " .set pop \n" \
+ : "=r" (mflo0)); \
mflo0; \
})
@@ -1524,10 +2317,11 @@ do { \
long mflo1; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac1 \n" \
- " .set pop \n" \
- : "=r" (mflo1)); \
+ " .set pop \n" \
+ : "=r" (mflo1)); \
mflo1; \
})
@@ -1536,10 +2330,11 @@ do { \
long mflo2; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac2 \n" \
- " .set pop \n" \
- : "=r" (mflo2)); \
+ " .set pop \n" \
+ : "=r" (mflo2)); \
mflo2; \
})
@@ -1548,10 +2343,11 @@ do { \
long mflo3; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac3 \n" \
- " .set pop \n" \
- : "=r" (mflo3)); \
+ " .set pop \n" \
+ : "=r" (mflo3)); \
mflo3; \
})
@@ -1560,10 +2356,11 @@ do { \
long mfhi0; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac0 \n" \
- " .set pop \n" \
- : "=r" (mfhi0)); \
+ " .set pop \n" \
+ : "=r" (mfhi0)); \
mfhi0; \
})
@@ -1572,10 +2369,11 @@ do { \
long mfhi1; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac1 \n" \
- " .set pop \n" \
- : "=r" (mfhi1)); \
+ " .set pop \n" \
+ : "=r" (mfhi1)); \
mfhi1; \
})
@@ -1584,10 +2382,11 @@ do { \
long mfhi2; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac2 \n" \
- " .set pop \n" \
- : "=r" (mfhi2)); \
+ " .set pop \n" \
+ : "=r" (mfhi2)); \
mfhi2; \
})
@@ -1596,10 +2395,11 @@ do { \
long mfhi3; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac3 \n" \
- " .set pop \n" \
- : "=r" (mfhi3)); \
+ " .set pop \n" \
+ : "=r" (mfhi3)); \
mfhi3; \
})
@@ -1608,6 +2408,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac0 \n" \
" .set pop \n" \
@@ -1619,6 +2420,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac1 \n" \
" .set pop \n" \
@@ -1630,6 +2432,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac2 \n" \
" .set pop \n" \
@@ -1641,6 +2444,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac3 \n" \
" .set pop \n" \
@@ -1652,6 +2456,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac0 \n" \
" .set pop \n" \
@@ -1663,6 +2468,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac1 \n" \
" .set pop \n" \
@@ -1674,6 +2480,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac2 \n" \
" .set pop \n" \
@@ -1685,6 +2492,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac3 \n" \
" .set pop \n" \
@@ -1694,7 +2502,6 @@ do { \
#else
-#ifdef CONFIG_CPU_MICROMIPS
#define rddsp(mask) \
({ \
unsigned int __res; \
@@ -1703,8 +2510,8 @@ do { \
" .set push \n" \
" .set noat \n" \
" # rddsp $1, %x1 \n" \
- " .hword ((0x0020067c | (%x1 << 14)) >> 16) \n" \
- " .hword ((0x0020067c | (%x1 << 14)) & 0xffff) \n" \
+ _ASM_INSN_IF_MIPS(0x7c000cb8 | (%x1 << 16)) \
+ _ASM_INSN32_IF_MM(0x0020067c | (%x1 << 14)) \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__res) \
@@ -1713,28 +2520,28 @@ do { \
})
#define wrdsp(val, mask) \
-({ \
+do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
" # wrdsp $1, %x1 \n" \
- " .hword ((0x0020167c | (%x1 << 14)) >> 16) \n" \
- " .hword ((0x0020167c | (%x1 << 14)) & 0xffff) \n" \
+ _ASM_INSN_IF_MIPS(0x7c2004f8 | (%x1 << 11)) \
+ _ASM_INSN32_IF_MM(0x0020167c | (%x1 << 14)) \
" .set pop \n" \
: \
: "r" (val), "i" (mask)); \
-})
+} while (0)
-#define _umips_dsp_mfxxx(ins) \
+#define _dsp_mfxxx(ins) \
({ \
unsigned long __treg; \
\
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " .hword 0x0001 \n" \
- " .hword %x1 \n" \
+ _ASM_INSN_IF_MIPS(0x00000810 | %X1) \
+ _ASM_INSN32_IF_MM(0x0001007c | %x1) \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__treg) \
@@ -1742,101 +2549,28 @@ do { \
__treg; \
})
-#define _umips_dsp_mtxxx(val, ins) \
-({ \
+#define _dsp_mtxxx(val, ins) \
+do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " .hword 0x0001 \n" \
- " .hword %x1 \n" \
+ _ASM_INSN_IF_MIPS(0x00200011 | %X1) \
+ _ASM_INSN32_IF_MM(0x0001207c | %x1) \
" .set pop \n" \
: \
: "r" (val), "i" (ins)); \
-})
-
-#define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c)
-#define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c)
-
-#define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c))
-#define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c))
-
-#define mflo0() _umips_dsp_mflo(0)
-#define mflo1() _umips_dsp_mflo(1)
-#define mflo2() _umips_dsp_mflo(2)
-#define mflo3() _umips_dsp_mflo(3)
+} while (0)
-#define mfhi0() _umips_dsp_mfhi(0)
-#define mfhi1() _umips_dsp_mfhi(1)
-#define mfhi2() _umips_dsp_mfhi(2)
-#define mfhi3() _umips_dsp_mfhi(3)
+#ifdef CONFIG_CPU_MICROMIPS
-#define mtlo0(x) _umips_dsp_mtlo(x, 0)
-#define mtlo1(x) _umips_dsp_mtlo(x, 1)
-#define mtlo2(x) _umips_dsp_mtlo(x, 2)
-#define mtlo3(x) _umips_dsp_mtlo(x, 3)
+#define _dsp_mflo(reg) _dsp_mfxxx((reg << 14) | 0x1000)
+#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 14) | 0x0000)
-#define mthi0(x) _umips_dsp_mthi(x, 0)
-#define mthi1(x) _umips_dsp_mthi(x, 1)
-#define mthi2(x) _umips_dsp_mthi(x, 2)
-#define mthi3(x) _umips_dsp_mthi(x, 3)
+#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x1000))
+#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x0000))
#else /* !CONFIG_CPU_MICROMIPS */
-#define rddsp(mask) \
-({ \
- unsigned int __res; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # rddsp $1, %x1 \n" \
- " .word 0x7c000cb8 | (%x1 << 16) \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__res) \
- : "i" (mask)); \
- __res; \
-})
-
-#define wrdsp(val, mask) \
-({ \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " # wrdsp $1, %x1 \n" \
- " .word 0x7c2004f8 | (%x1 << 11) \n" \
- " .set pop \n" \
- : \
- : "r" (val), "i" (mask)); \
-})
-
-#define _dsp_mfxxx(ins) \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " .word (0x00000810 | %1) \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg) \
- : "i" (ins)); \
- __treg; \
-})
-
-#define _dsp_mtxxx(val, ins) \
-({ \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " .word (0x00200011 | %1) \n" \
- " .set pop \n" \
- : \
- : "r" (val), "i" (ins)); \
-})
#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002)
#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000)
@@ -1844,6 +2578,8 @@ do { \
#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002))
#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000))
+#endif /* CONFIG_CPU_MICROMIPS */
+
#define mflo0() _dsp_mflo(0)
#define mflo1() _dsp_mflo(1)
#define mflo2() _dsp_mflo(2)
@@ -1864,7 +2600,6 @@ do { \
#define mthi2(x) _dsp_mthi(x, 2)
#define mthi3(x) _dsp_mthi(x, 3)
-#endif /* CONFIG_CPU_MICROMIPS */
#endif
/*
@@ -1934,54 +2669,123 @@ static inline void tlb_write_random(void)
}
/*
- * Manipulate bits in a c0 register.
+ * Guest TLB operations.
+ *
+ * It is responsibility of the caller to take care of any TLB hazards.
*/
-#define __BUILD_SET_C0(name) \
+static inline void guest_tlb_probe(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbgp\n\t"
+ ".set pop");
+}
+
+static inline void guest_tlb_read(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbgr\n\t"
+ ".set pop");
+}
+
+static inline void guest_tlb_write_indexed(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbgwi\n\t"
+ ".set pop");
+}
+
+static inline void guest_tlb_write_random(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbgwr\n\t"
+ ".set pop");
+}
+
+/*
+ * Guest TLB Invalidate Flush
+ */
+static inline void guest_tlbinvf(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbginvf\n\t"
+ ".set pop");
+}
+
+/*
+ * Manipulate bits in a register.
+ */
+#define __BUILD_SET_COMMON(name) \
static inline unsigned int \
-set_c0_##name(unsigned int set) \
+set_##name(unsigned int set) \
{ \
unsigned int res, new; \
\
- res = read_c0_##name(); \
+ res = read_##name(); \
new = res | set; \
- write_c0_##name(new); \
+ write_##name(new); \
\
return res; \
} \
\
static inline unsigned int \
-clear_c0_##name(unsigned int clear) \
+clear_##name(unsigned int clear) \
{ \
unsigned int res, new; \
\
- res = read_c0_##name(); \
+ res = read_##name(); \
new = res & ~clear; \
- write_c0_##name(new); \
+ write_##name(new); \
\
return res; \
} \
\
static inline unsigned int \
-change_c0_##name(unsigned int change, unsigned int val) \
+change_##name(unsigned int change, unsigned int val) \
{ \
unsigned int res, new; \
\
- res = read_c0_##name(); \
+ res = read_##name(); \
new = res & ~change; \
new |= (val & change); \
- write_c0_##name(new); \
+ write_##name(new); \
\
return res; \
}
+/*
+ * Manipulate bits in a c0 register.
+ */
+#define __BUILD_SET_C0(name) __BUILD_SET_COMMON(c0_##name)
+
__BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
+__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
__BUILD_SET_C0(pagegrain)
+__BUILD_SET_C0(guestctl0)
+__BUILD_SET_C0(guestctl0ext)
+__BUILD_SET_C0(guestctl1)
+__BUILD_SET_C0(guestctl2)
+__BUILD_SET_C0(guestctl3)
__BUILD_SET_C0(brcm_config_0)
__BUILD_SET_C0(brcm_bus_pll)
__BUILD_SET_C0(brcm_reset)
@@ -1991,12 +2795,23 @@ __BUILD_SET_C0(brcm_config)
__BUILD_SET_C0(brcm_mode)
/*
+ * Manipulate bits in a guest c0 register.
+ */
+#define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name)
+
+__BUILD_SET_GC0(wired)
+__BUILD_SET_GC0(status)
+__BUILD_SET_GC0(cause)
+__BUILD_SET_GC0(ebase)
+__BUILD_SET_GC0(config1)
+
+/*
* Return low 10 bits of ebase.
* Note that under KVM (MIPSVZ) this returns vcpu id.
*/
static inline unsigned int get_ebase_cpunum(void)
{
- return read_c0_ebase() & 0x3ff;
+ return read_c0_ebase() & MIPS_EBASE_CPUNUM;
}
static inline void write_one_tlb(int index, u32 pagemask, u32 hi, u32 low0,
diff --git a/arch/mips/include/asm/relocs.h b/arch/mips/include/asm/relocs.h
index 0987c4bb13..b9b0261f62 100644
--- a/arch/mips/include/asm/relocs.h
+++ b/arch/mips/include/asm/relocs.h
@@ -8,7 +8,7 @@
#ifndef __ASM_MIPS_RELOCS_H__
#define __ASM_MIPS_RELOCS_H__
-#define R_MIPS_NONE 0
+#define R_MIPS_NONE 0xbeef7531
#define R_MIPS_32 2
#define R_MIPS_26 4
#define R_MIPS_HI16 5
diff --git a/arch/mips/lib/bootm.c b/arch/mips/lib/bootm.c
index 82f986cb81..0a13f6edb7 100644
--- a/arch/mips/lib/bootm.c
+++ b/arch/mips/lib/bootm.c
@@ -51,7 +51,7 @@ void arch_lmb_reserve(struct lmb *lmb)
static void linux_cmdline_init(void)
{
linux_argc = 1;
- linux_argv = (char **)UNCACHED_SDRAM(gd->bd->bi_boot_params);
+ linux_argv = (char **)CKSEG1ADDR(gd->bd->bi_boot_params);
linux_argv[0] = 0;
linux_argp = (char *)(linux_argv + LINUX_MAX_ARGS);
}
@@ -186,7 +186,7 @@ static void linux_env_legacy(bootm_headers_t *images)
(ulong)(gd->ram_size >> 20));
}
- rd_start = UNCACHED_SDRAM(images->initrd_start);
+ rd_start = CKSEG1ADDR(images->initrd_start);
rd_size = images->initrd_end - images->initrd_start;
linux_env_init();
diff --git a/arch/mips/lib/cache.c b/arch/mips/lib/cache.c
index 24f115ebc9..cf29994a7a 100644
--- a/arch/mips/lib/cache.c
+++ b/arch/mips/lib/cache.c
@@ -8,9 +8,7 @@
#include <cpu_func.h>
#include <asm/cache.h>
#include <asm/cacheops.h>
-#ifdef CONFIG_MIPS_L2_CACHE
#include <asm/cm.h>
-#endif
#include <asm/io.h>
#include <asm/mipsregs.h>
#include <asm/system.h>
@@ -109,7 +107,7 @@ static inline unsigned long scache_line_size(void)
} \
} while (0)
-void flush_cache(ulong start_addr, ulong size)
+void __weak flush_cache(ulong start_addr, ulong size)
{
unsigned long ilsize = icache_line_size();
unsigned long dlsize = dcache_line_size();
@@ -161,7 +159,7 @@ void __weak flush_dcache_range(ulong start_addr, ulong stop)
sync();
}
-void invalidate_dcache_range(ulong start_addr, ulong stop)
+void __weak invalidate_dcache_range(ulong start_addr, ulong stop)
{
unsigned long lsize = dcache_line_size();
unsigned long slsize = scache_line_size();
diff --git a/arch/mips/lib/cache_init.S b/arch/mips/lib/cache_init.S
index cfad1d9c8a..602741c65d 100644
--- a/arch/mips/lib/cache_init.S
+++ b/arch/mips/lib/cache_init.S
@@ -79,6 +79,21 @@
.set pop
.endm
+ /*
+ * The changing of Kernel mode cacheability must be done from KSEG1.
+ * If the code is executing from KSEG0, jump to KSEG1 during the execution
+ * of change_k0_cca. change_k0_cca itself clears all hazards when returning.
+ */
+ .macro change_k0_cca_kseg1 mode
+ PTR_LA t0, change_k0_cca
+ li t1, CPHYSADDR(~0)
+ and t0, t0, t1
+ PTR_LI t1, CKSEG1
+ or t0, t0, t1
+ li a0, \mode
+ jalr t0
+ .endm
+
/*
* mips_cache_reset - low level initialisation of the primary caches
*
@@ -317,18 +332,9 @@ l1_init:
sync
/*
- * Enable use of the I-cache by setting Config.K0. The code for this
- * must be executed from KSEG1. Jump from KSEG0 to KSEG1 to do this.
- * Jump back to KSEG0 after caches are enabled and insert an
- * instruction hazard barrier.
+ * Enable use of the I-cache by setting Config.K0.
*/
- PTR_LA t0, change_k0_cca
- li t1, CPHYSADDR(~0)
- and t0, t0, t1
- PTR_LI t1, CKSEG1
- or t0, t0, t1
- li a0, CONF_CM_CACHABLE_NONCOHERENT
- jalr.hb t0
+ change_k0_cca_kseg1 CONF_CM_CACHABLE_NONCOHERENT
/*
* then initialize D-cache.
@@ -388,9 +394,7 @@ l2_unbypass:
beqz t0, 2f
/* Change Config.K0 to a coherent CCA */
- PTR_LA t0, change_k0_cca
- li a0, CONF_CM_CACHABLE_COW
- jalr t0
+ change_k0_cca_kseg1 CONF_CM_CACHABLE_COW
/*
* Join the coherent domain such that the caches of this core are kept
@@ -414,6 +418,12 @@ return:
jr R_RETURN
END(mips_cache_reset)
+LEAF(mips_cache_disable)
+ move R_RETURN, ra
+ change_k0_cca_kseg1 CONF_CM_UNCACHED
+ jr R_RETURN
+ END(mips_cache_disable)
+
LEAF(change_k0_cca)
mfc0 t0, CP0_CONFIG
#if __mips_isa_rev >= 2
diff --git a/arch/mips/lib/reloc.c b/arch/mips/lib/reloc.c
index ffc8c7a1b7..67c8af2f35 100644
--- a/arch/mips/lib/reloc.c
+++ b/arch/mips/lib/reloc.c
@@ -67,7 +67,7 @@ static unsigned long read_uint(uint8_t **buf)
* intentionally simple, and does the bare minimum needed to fixup the
* relocated U-Boot - in particular, it does not check for overflows.
*/
-static void apply_reloc(unsigned int type, void *addr, long off)
+static void apply_reloc(unsigned int type, void *addr, long off, uint8_t *buf)
{
uint32_t u32;
@@ -92,7 +92,8 @@ static void apply_reloc(unsigned int type, void *addr, long off)
break;
default:
- panic("Unhandled reloc type %u\n", type);
+ panic("Unhandled reloc type %u (@ %p), bss used before relocation?\n",
+ type, buf);
}
}
@@ -137,7 +138,7 @@ void relocate_code(ulong start_addr_sp, gd_t *new_gd, ulong relocaddr)
break;
addr += read_uint(&buf) << 2;
- apply_reloc(type, (void *)addr, off);
+ apply_reloc(type, (void *)addr, off, buf);
}
/* Ensure the icache is coherent */
diff --git a/arch/mips/lib/traps.c b/arch/mips/lib/traps.c
index b1ae02fcab..df8b63f383 100644
--- a/arch/mips/lib/traps.c
+++ b/arch/mips/lib/traps.c
@@ -108,6 +108,10 @@ void trap_init(ulong reloc_addr)
saved_ebase = read_c0_ebase() & 0xfffff000;
+ /* Set WG bit on Octeon to enable writing to bits 63:30 */
+ if (IS_ENABLED(CONFIG_ARCH_OCTEON))
+ ebase |= MIPS_EBASE_WG;
+
write_c0_ebase(ebase);
clear_c0_status(ST0_BEV);
execution_hazard_barrier();
diff --git a/arch/mips/mach-octeon/Kconfig b/arch/mips/mach-octeon/Kconfig
new file mode 100644
index 0000000000..e8596ed99a
--- /dev/null
+++ b/arch/mips/mach-octeon/Kconfig
@@ -0,0 +1,60 @@
+menu "Octeon platforms"
+ depends on ARCH_OCTEON
+
+config SYS_SOC
+ string
+ default "octeon"
+
+config OCTEON_CN7XXX
+ bool "Octeon CN7XXX SoC"
+
+config OCTEON_CN70XX
+ bool "Octeon CN70XX SoC"
+ select OCTEON_CN7XXX
+
+config OCTEON_CN73XX
+ bool "Octeon CN73XX SoC"
+ select OCTEON_CN7XXX
+
+config OCTEON_CN78XX
+ bool "Octeon CN78XX SoC"
+ select OCTEON_CN7XXX
+
+choice
+ prompt "Octeon MIPS family select"
+
+config SOC_OCTEON3
+ bool "Octeon III family"
+ help
+ This selects the Octeon III SoC family CN70xx, CN73XX, CN78xx
+ and CNF75XX.
+
+endchoice
+
+choice
+ prompt "Octeon 3 board select"
+ default TARGET_OCTEON_EBB7304
+
+config TARGET_OCTEON_EBB7304
+ bool "Marvell Octeon EBB7304"
+ select OCTEON_CN73XX
+ help
+ Choose this for the Octeon EBB7304 board
+
+endchoice
+
+config SYS_DCACHE_SIZE
+ default 32768
+
+config SYS_DCACHE_LINE_SIZE
+ default 128
+
+config SYS_ICACHE_SIZE
+ default 79872
+
+config SYS_ICACHE_LINE_SIZE
+ default 128
+
+source "board/Marvell/octeon_ebb7304/Kconfig"
+
+endmenu
diff --git a/arch/mips/mach-octeon/Makefile b/arch/mips/mach-octeon/Makefile
new file mode 100644
index 0000000000..2e37ca572c
--- /dev/null
+++ b/arch/mips/mach-octeon/Makefile
@@ -0,0 +1,10 @@
+# (C) Copyright 2019 Marvell, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y += lowlevel_init.o
+obj-y += cache.o
+obj-y += clock.o
+obj-y += cpu.o
+obj-y += dram.o
diff --git a/arch/mips/mach-octeon/cache.c b/arch/mips/mach-octeon/cache.c
new file mode 100644
index 0000000000..9a88bb97c7
--- /dev/null
+++ b/arch/mips/mach-octeon/cache.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <cpu_func.h>
+
+/*
+ * The Octeon platform is cache coherent and cache flushes and invalidates
+ * are not needed. Define some platform specific empty flush_foo()
+ * functions here to overwrite the _weak common function as a no-op.
+ * This effectively disables all cache operations.
+ */
+void flush_dcache_range(ulong start_addr, ulong stop)
+{
+}
+
+void flush_cache(ulong start_addr, ulong size)
+{
+}
+
+void invalidate_dcache_range(ulong start_addr, ulong stop)
+{
+}
diff --git a/arch/mips/mach-octeon/clock.c b/arch/mips/mach-octeon/clock.c
new file mode 100644
index 0000000000..119b3ac50b
--- /dev/null
+++ b/arch/mips/mach-octeon/clock.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, 2019 Marvell International Ltd.
+ */
+
+#include <asm/global_data.h>
+#include <mach/clock.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+ulong notrace get_tbclk(void)
+{
+ return gd->cpu_clk;
+}
diff --git a/arch/mips/mach-octeon/cpu.c b/arch/mips/mach-octeon/cpu.c
new file mode 100644
index 0000000000..2680a2e6ed
--- /dev/null
+++ b/arch/mips/mach-octeon/cpu.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <asm/global_data.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/compat.h>
+#include <linux/io.h>
+#include <mach/clock.h>
+#include <mach/cavm-reg.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static int get_clocks(void)
+{
+ const u64 ref_clock = PLL_REF_CLK;
+ void __iomem *rst_boot;
+ u64 val;
+
+ rst_boot = ioremap(CAVM_RST_BOOT, 0);
+ val = ioread64(rst_boot);
+ gd->cpu_clk = ref_clock * FIELD_GET(RST_BOOT_C_MUL, val);
+ gd->bus_clk = ref_clock * FIELD_GET(RST_BOOT_PNR_MUL, val);
+
+ debug("%s: cpu: %lu, bus: %lu\n", __func__, gd->cpu_clk, gd->bus_clk);
+
+ return 0;
+}
+
+/* Early mach init code run from flash */
+int mach_cpu_init(void)
+{
+ void __iomem *mio_boot_reg_cfg0;
+
+ /* Remap boot-bus 0x1fc0.0000 -> 0x1f40.0000 */
+ /* ToDo: Move this to an early running bus (bootbus) DM driver */
+ mio_boot_reg_cfg0 = ioremap(CAVM_MIO_BOOT_REG_CFG0, 0);
+ clrsetbits_be64(mio_boot_reg_cfg0, 0xffff, 0x1f40);
+
+ /* Get clocks and store them in GD */
+ get_clocks();
+
+ return 0;
+}
+
+/**
+ * Returns number of cores
+ *
+ * @return number of CPU cores for the specified node
+ */
+static int cavm_octeon_num_cores(void)
+{
+ void __iomem *ciu_fuse;
+
+ ciu_fuse = ioremap(CAVM_CIU_FUSE, 0);
+ return fls64(ioread64(ciu_fuse) & 0xffffffffffff);
+}
+
+int print_cpuinfo(void)
+{
+ printf("SoC: Octeon CN73xx (%d cores)\n", cavm_octeon_num_cores());
+
+ return 0;
+}
diff --git a/arch/mips/mach-octeon/dram.c b/arch/mips/mach-octeon/dram.c
new file mode 100644
index 0000000000..ff7a59f2ab
--- /dev/null
+++ b/arch/mips/mach-octeon/dram.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Stefan Roese <sr@denx.de>
+ */
+
+#include <dm.h>
+#include <ram.h>
+#include <asm/global_data.h>
+#include <linux/compat.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+int dram_init(void)
+{
+ /*
+ * No DDR init yet -> run in L2 cache
+ */
+ gd->ram_size = (4 << 20);
+ gd->bd->bi_dram[0].size = gd->ram_size;
+ gd->bd->bi_dram[1].size = 0;
+
+ return 0;
+}
+
+ulong board_get_usable_ram_top(ulong total_size)
+{
+ return gd->ram_top;
+}
diff --git a/arch/mips/mach-octeon/include/ioremap.h b/arch/mips/mach-octeon/include/ioremap.h
new file mode 100644
index 0000000000..59b75008a2
--- /dev/null
+++ b/arch/mips/mach-octeon/include/ioremap.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_OCTEON_IOREMAP_H
+#define __ASM_MACH_OCTEON_IOREMAP_H
+
+#include <linux/types.h>
+
+/*
+ * Allow physical addresses to be fixed up to help peripherals located
+ * outside the low 32-bit range -- generic pass-through version.
+ */
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr,
+ phys_addr_t size)
+{
+ return phys_addr;
+}
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+ unsigned long flags)
+{
+ return (void __iomem *)(XKPHYS | offset);
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+ return 0;
+}
+
+#define _page_cachable_default _CACHE_CACHABLE_NONCOHERENT
+
+#endif /* __ASM_MACH_OCTEON_IOREMAP_H */
diff --git a/arch/mips/mach-octeon/include/mach/cavm-reg.h b/arch/mips/mach-octeon/include/mach/cavm-reg.h
new file mode 100644
index 0000000000..45850ea355
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cavm-reg.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __CAVM_REG_H__
+
+/* Register offsets */
+#define CAVM_CIU_FUSE 0x00010100000001a0
+#define CAVM_MIO_BOOT_REG_CFG0 0x0001180000000000
+#define CAVM_RST_BOOT 0x0001180006001600
+
+/* Register bits */
+#define RST_BOOT_C_MUL GENMASK_ULL(36, 30)
+#define RST_BOOT_PNR_MUL GENMASK_ULL(29, 24)
+
+#endif /* __CAVM_REG_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/clock.h b/arch/mips/mach-octeon/include/mach/clock.h
new file mode 100644
index 0000000000..85c8d3dc4f
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/clock.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018, 2019 Marvell International Ltd.
+ */
+
+#ifndef __CLOCK_H__
+
+/** System PLL reference clock */
+#define PLL_REF_CLK 50000000 /* 50 MHz */
+#define NS_PER_REF_CLK_TICK (1000000000 / PLL_REF_CLK)
+
+#endif /* __CLOCK_H__ */
diff --git a/arch/mips/mach-octeon/lowlevel_init.S b/arch/mips/mach-octeon/lowlevel_init.S
new file mode 100644
index 0000000000..fa87cb4e34
--- /dev/null
+++ b/arch/mips/mach-octeon/lowlevel_init.S
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Stefan Roese <sr@denx.de>
+ */
+
+#include <config.h>
+#include <asm-offsets.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+
+ .set noreorder
+
+LEAF(lowlevel_init)
+ jr ra
+ nop
+ END(lowlevel_init)
+
+LEAF(mips_mach_early_init)
+
+ move s0, ra
+
+ bal __dummy
+ nop
+
+__dummy:
+ /* Get the actual address that we are running at */
+ PTR_LA a7, __dummy
+ dsubu t3, ra, a7 /* t3 now has reloc offset */
+
+ PTR_LA t1, _start
+ daddu t0, t1, t3 /* t0 now has actual address of _start */
+
+ /* Calculate end address of copy loop */
+ PTR_LA t2, _end
+ daddiu t2, t2, 0x4000 /* Increase size to include appended DTB */
+ daddiu t2, t2, 127
+ ins t2, zero, 0, 7 /* Round up to cache line for memcpy */
+
+ /* Copy ourself to the L2 cache from flash, 32 bytes at a time */
+1:
+ ld a0, 0(t0)
+ ld a1, 8(t0)
+ ld a2, 16(t0)
+ ld a3, 24(t0)
+ sd a0, 0(t1)
+ sd a1, 8(t1)
+ sd a2, 16(t1)
+ sd a3, 24(t1)
+ addiu t0, 32
+ addiu t1, 32
+ bne t1, t2, 1b
+ nop
+
+ sync
+
+ /*
+ * Return to start.S now running from TEXT_BASE, which points
+ * to DRAM address space, which effectively is L2 cache now.
+ * This speeds up the init process extremely, especially the
+ * DDR init code.
+ */
+ dsubu s0, s0, t3 /* Fixup return address with reloc offset */
+ jr.hb s0 /* Jump back with hazard barrier */
+ nop
+
+ END(mips_mach_early_init)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 27295ef384..ff4f06ed79 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -603,6 +603,13 @@ config SMP
only one CPU will be enabled regardless of the number of CPUs
available.
+config SMP_AP_WORK
+ bool
+ depends on SMP
+ help
+ Allow APs to do other work after initialisation instead of going
+ to sleep.
+
config MAX_CPUS
int "Maximum number of CPUs permitted"
depends on SMP
diff --git a/arch/x86/cpu/Makefile b/arch/x86/cpu/Makefile
index ee0499f5d7..16e67e3da2 100644
--- a/arch/x86/cpu/Makefile
+++ b/arch/x86/cpu/Makefile
@@ -60,7 +60,7 @@ ifndef CONFIG_SYS_COREBOOT
obj-$(CONFIG_$(SPL_TPL_)X86_32BIT_INIT) += irq.o
endif
ifndef CONFIG_$(SPL_)X86_64
-obj-$(CONFIG_SMP) += mp_init.o
+obj-$(CONFIG_$(SPL_)SMP) += mp_init.o
endif
obj-y += mtrr.o
obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/x86/cpu/apollolake/Kconfig b/arch/x86/cpu/apollolake/Kconfig
index 942f11f566..99d4e105c2 100644
--- a/arch/x86/cpu/apollolake/Kconfig
+++ b/arch/x86/cpu/apollolake/Kconfig
@@ -15,6 +15,7 @@ config INTEL_APOLLOLAKE
select TPL_PCH_SUPPORT
select PCH_SUPPORT
select P2SB
+ select SMP_AP_WORK
imply ENABLE_MRC_CACHE
imply AHCI_PCI
imply SCSI
diff --git a/arch/x86/cpu/cpu.c b/arch/x86/cpu/cpu.c
index 98ed66e67d..69c14189d1 100644
--- a/arch/x86/cpu/cpu.c
+++ b/arch/x86/cpu/cpu.c
@@ -67,10 +67,13 @@ static const char *const x86_vendor_name[] = {
int __weak x86_cleanup_before_linux(void)
{
-#ifdef CONFIG_BOOTSTAGE_STASH
+ int ret;
+
+ ret = mp_park_aps();
+ if (ret)
+ return log_msg_ret("park", ret);
bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH_ADDR,
CONFIG_BOOTSTAGE_STASH_SIZE);
-#endif
return 0;
}
@@ -201,18 +204,19 @@ int last_stage_init(void)
write_tables();
-#ifdef CONFIG_GENERATE_ACPI_TABLE
- fadt = acpi_find_fadt();
+ if (IS_ENABLED(CONFIG_GENERATE_ACPI_TABLE)) {
+ fadt = acpi_find_fadt();
- /* Don't touch ACPI hardware on HW reduced platforms */
- if (fadt && !(fadt->flags & ACPI_FADT_HW_REDUCED_ACPI)) {
- /*
- * Other than waiting for OSPM to request us to switch to ACPI
- * mode, do it by ourselves, since SMI will not be triggered.
- */
- enter_acpi_mode(fadt->pm1a_cnt_blk);
+ /* Don't touch ACPI hardware on HW reduced platforms */
+ if (fadt && !(fadt->flags & ACPI_FADT_HW_REDUCED_ACPI)) {
+ /*
+ * Other than waiting for OSPM to request us to switch
+ * to ACPI * mode, do it by ourselves, since SMI will
+ * not be triggered.
+ */
+ enter_acpi_mode(fadt->pm1a_cnt_blk);
+ }
}
-#endif
return 0;
}
@@ -220,19 +224,20 @@ int last_stage_init(void)
static int x86_init_cpus(void)
{
-#ifdef CONFIG_SMP
- debug("Init additional CPUs\n");
- x86_mp_init();
-#else
- struct udevice *dev;
+ if (IS_ENABLED(CONFIG_SMP)) {
+ debug("Init additional CPUs\n");
+ x86_mp_init();
+ } else {
+ struct udevice *dev;
- /*
- * This causes the cpu-x86 driver to be probed.
- * We don't check return value here as we want to allow boards
- * which have not been converted to use cpu uclass driver to boot.
- */
- uclass_first_device(UCLASS_CPU, &dev);
-#endif
+ /*
+ * This causes the cpu-x86 driver to be probed.
+ * We don't check return value here as we want to allow boards
+ * which have not been converted to use cpu uclass driver to
+ * boot.
+ */
+ uclass_first_device(UCLASS_CPU, &dev);
+ }
return 0;
}
@@ -276,9 +281,8 @@ int reserve_arch(void)
if (IS_ENABLED(CONFIG_ENABLE_MRC_CACHE))
mrccache_reserve();
-#ifdef CONFIG_SEABIOS
- high_table_reserve();
-#endif
+ if (IS_ENABLED(CONFIG_SEABIOS))
+ high_table_reserve();
if (IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)) {
acpi_s3_reserve();
diff --git a/arch/x86/cpu/i386/cpu.c b/arch/x86/cpu/i386/cpu.c
index a6a6afec8c..8f342dd06e 100644
--- a/arch/x86/cpu/i386/cpu.c
+++ b/arch/x86/cpu/i386/cpu.c
@@ -21,6 +21,7 @@
#include <common.h>
#include <cpu_func.h>
#include <init.h>
+#include <log.h>
#include <malloc.h>
#include <spl.h>
#include <asm/control_regs.h>
@@ -630,32 +631,15 @@ int cpu_jump_to_64bit_uboot(ulong target)
return -EFAULT;
}
-#ifdef CONFIG_SMP
-static int enable_smis(struct udevice *cpu, void *unused)
-{
- return 0;
-}
-
-static struct mp_flight_record mp_steps[] = {
- MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL),
- /* Wait for APs to finish initialization before proceeding */
- MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL),
-};
-
int x86_mp_init(void)
{
- struct mp_params mp_params;
-
- mp_params.parallel_microcode_load = 0,
- mp_params.flight_plan = &mp_steps[0];
- mp_params.num_records = ARRAY_SIZE(mp_steps);
- mp_params.microcode_pointer = 0;
+ int ret;
- if (mp_init(&mp_params)) {
+ ret = mp_init();
+ if (ret) {
printf("Warning: MP init failure\n");
- return -EIO;
+ return log_ret(ret);
}
return 0;
}
-#endif
diff --git a/arch/x86/cpu/mp_init.c b/arch/x86/cpu/mp_init.c
index 7fde4ff7e1..d2f1ee38cf 100644
--- a/arch/x86/cpu/mp_init.c
+++ b/arch/x86/cpu/mp_init.c
@@ -15,6 +15,7 @@
#include <asm/atomic.h>
#include <asm/cpu.h>
#include <asm/interrupt.h>
+#include <asm/io.h>
#include <asm/lapic.h>
#include <asm/microcode.h>
#include <asm/mp.h>
@@ -31,29 +32,132 @@
DECLARE_GLOBAL_DATA_PTR;
-/* Total CPUs include BSP */
-static int num_cpus;
+/*
+ * Setting up multiprocessing
+ *
+ * See https://www.intel.com/content/www/us/en/intelligent-systems/intel-boot-loader-development-kit/minimal-intel-architecture-boot-loader-paper.html
+ *
+ * Note that this file refers to the boot CPU (the one U-Boot is running on) as
+ * the BSP (BootStrap Processor) and the others as APs (Application Processors).
+ *
+ * This module works by loading some setup code into RAM at AP_DEFAULT_BASE and
+ * telling each AP to execute it. The code that each AP runs is in
+ * sipi_vector.S (see ap_start16) which includes a struct sipi_params at the
+ * end of it. Those parameters are set up by the C code.
+
+ * Setting up is handled by load_sipi_vector(). It inits the common block of
+ * parameters (sipi_params) which tell the APs what to do. This block includes
+ * microcode and the MTTRs (Memory-Type-Range Registers) from the main CPU.
+ * There is also an ap_count which each AP increments as it starts up, so the
+ * BSP can tell how many checked in.
+ *
+ * The APs are started with a SIPI (Startup Inter-Processor Interrupt) which
+ * tells an AP to start executing at a particular address, in this case
+ * AP_DEFAULT_BASE which contains the code copied from ap_start16. This protocol
+ * is handled by start_aps().
+ *
+ * After being started, each AP runs the code in ap_start16, switches to 32-bit
+ * mode, runs the code at ap_start, then jumps to c_handler which is ap_init().
+ * This runs a very simple 'flight plan' described in mp_steps(). This sets up
+ * the CPU and waits for further instructions by looking at its entry in
+ * ap_callbacks[]. Note that the flight plan is only actually run for each CPU
+ * in bsp_do_flight_plan(): once the BSP completes each flight record, it sets
+ * mp_flight_record->barrier to 1 to allow the APs to executed the record one
+ * by one.
+ *
+ * CPUS are numbered sequentially from 0 using the device tree:
+ *
+ * cpus {
+ * u-boot,dm-pre-reloc;
+ * #address-cells = <1>;
+ * #size-cells = <0>;
+ *
+ * cpu@0 {
+ * u-boot,dm-pre-reloc;
+ * device_type = "cpu";
+ * compatible = "intel,apl-cpu";
+ * reg = <0>;
+ * intel,apic-id = <0>;
+ * };
+ *
+ * cpu@1 {
+ * device_type = "cpu";
+ * compatible = "intel,apl-cpu";
+ * reg = <1>;
+ * intel,apic-id = <2>;
+ * };
+ *
+ * Here the 'reg' property is the CPU number and then is placed in dev->req_seq
+ * so that we can index into ap_callbacks[] using that. The APIC ID is different
+ * and may not be sequential (it typically is if hyperthreading is supported).
+ *
+ * Once APs are inited they wait in ap_wait_for_instruction() for instructions.
+ * Instructions come in the form of a function to run. This logic is in
+ * mp_run_on_cpus() which supports running on any one AP, all APs, just the BSP
+ * or all CPUs. The BSP logic is handled directly in mp_run_on_cpus(), by
+ * calling the function. For the APs, callback information is stored in a
+ * single, common struct mp_callback and a pointer to this is written to each
+ * AP's slot in ap_callbacks[] by run_ap_work(). All APs get the message even
+ * if it is only for one of them. When an AP notices a message it checks whether
+ * it should call the function (see check in ap_wait_for_instruction()) and then
+ * does so if needed. After that it sets its slot to NULL to indicate it is
+ * done.
+ *
+ * While U-Boot is running it can use mp_run_on_cpus() to run code on the APs.
+ * An example of this is the 'mtrr' command which allows reading and changing
+ * the MTRRs on all CPUs.
+ *
+ * Before U-Boot exits it calls mp_park_aps() which tells all CPUs to halt by
+ * executing a 'hlt' instruction. That allows them to be used by Linux when it
+ * starts up.
+ */
/* This also needs to match the sipi.S assembly code for saved MSR encoding */
-struct saved_msr {
+struct __packed saved_msr {
uint32_t index;
uint32_t lo;
uint32_t hi;
-} __packed;
-
+};
+/**
+ * struct mp_flight_plan - Holds the flight plan
+ *
+ * @num_records: Number of flight records
+ * @records: Pointer to each record
+ */
struct mp_flight_plan {
int num_records;
struct mp_flight_record *records;
};
+/**
+ * struct mp_callback - Callback information for APs
+ *
+ * @func: Function to run
+ * @arg: Argument to pass to the function
+ * @logical_cpu_number: Either a CPU number (i.e. dev->req_seq) or a special
+ * value like MP_SELECT_BSP. It tells the AP whether it should process this
+ * callback
+ */
+struct mp_callback {
+ mp_run_func func;
+ void *arg;
+ int logical_cpu_number;
+};
+
+/* Stores the flight plan so that APs can find it */
static struct mp_flight_plan mp_info;
-struct cpu_map {
- struct udevice *dev;
- int apic_id;
- int err_code;
-};
+/*
+ * ap_callbacks - Callback mailbox array
+ *
+ * Array of callback, one entry for each available CPU, indexed by the CPU
+ * number, which is dev->req_seq. The entry for the main CPU is never used.
+ * When this is NULL, there is no pending work for the CPU to run. When
+ * non-NULL it points to the mp_callback structure. This is shared between all
+ * CPUs, so should only be written by the main CPU.
+ */
+static struct mp_callback **ap_callbacks;
static inline void barrier_wait(atomic_t *b)
{
@@ -151,11 +255,12 @@ static void ap_init(unsigned int cpu_index)
debug("AP: slot %d apic_id %x, dev %s\n", cpu_index, apic_id,
dev ? dev->name : "(apic_id not found)");
- /* Walk the flight plan */
+ /*
+ * Walk the flight plan, which only returns if CONFIG_SMP_AP_WORK is not
+ * enabled
+ */
ap_do_flight_plan(dev);
- /* Park the AP */
- debug("parking\n");
done:
stop_this_cpu();
}
@@ -309,13 +414,26 @@ static int apic_wait_timeout(int total_delay, const char *msg)
return 0;
}
-static int start_aps(int ap_count, atomic_t *num_aps)
+/**
+ * start_aps() - Start up the APs and count how many we find
+ *
+ * This is called on the boot processor to start up all the other processors
+ * (here called APs).
+ *
+ * @num_aps: Number of APs we expect to find
+ * @ap_count: Initially zero. Incremented by this function for each AP found
+ * @return 0 if all APs were set up correctly or there are none to set up,
+ * -ENOSPC if the SIPI vector is too high in memory,
+ * -ETIMEDOUT if the ICR is busy or the second SIPI fails to complete
+ * -EIO if not all APs check in correctly
+ */
+static int start_aps(int num_aps, atomic_t *ap_count)
{
int sipi_vector;
/* Max location is 4KiB below 1MiB */
const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12;
- if (ap_count == 0)
+ if (num_aps == 0)
return 0;
/* The vector is sent as a 4k aligned address in one byte */
@@ -327,7 +445,7 @@ static int start_aps(int ap_count, atomic_t *num_aps)
return -ENOSPC;
}
- debug("Attempting to start %d APs\n", ap_count);
+ debug("Attempting to start %d APs\n", num_aps);
if (apic_wait_timeout(1000, "ICR not to be busy"))
return -ETIMEDOUT;
@@ -350,7 +468,7 @@ static int start_aps(int ap_count, atomic_t *num_aps)
return -ETIMEDOUT;
/* Wait for CPUs to check in up to 200 us */
- wait_for_aps(num_aps, ap_count, 200, 15);
+ wait_for_aps(ap_count, num_aps, 200, 15);
/* Send 2nd SIPI */
if (apic_wait_timeout(1000, "ICR not to be busy"))
@@ -363,25 +481,35 @@ static int start_aps(int ap_count, atomic_t *num_aps)
return -ETIMEDOUT;
/* Wait for CPUs to check in */
- if (wait_for_aps(num_aps, ap_count, 10000, 50)) {
+ if (wait_for_aps(ap_count, num_aps, 10000, 50)) {
debug("Not all APs checked in: %d/%d\n",
- atomic_read(num_aps), ap_count);
+ atomic_read(ap_count), num_aps);
return -EIO;
}
return 0;
}
-static int bsp_do_flight_plan(struct udevice *cpu, struct mp_params *mp_params)
+/**
+ * bsp_do_flight_plan() - Do the flight plan on the BSP
+ *
+ * This runs the flight plan on the main CPU used to boot U-Boot
+ *
+ * @cpu: Device for the main CPU
+ * @plan: Flight plan to run
+ * @num_aps: Number of APs (CPUs other than the BSP)
+ * @returns 0 on success, -ETIMEDOUT if an AP failed to come up
+ */
+static int bsp_do_flight_plan(struct udevice *cpu, struct mp_flight_plan *plan,
+ int num_aps)
{
int i;
int ret = 0;
const int timeout_us = 100000;
const int step_us = 100;
- int num_aps = num_cpus - 1;
- for (i = 0; i < mp_params->num_records; i++) {
- struct mp_flight_record *rec = &mp_params->flight_plan[i];
+ for (i = 0; i < plan->num_records; i++) {
+ struct mp_flight_record *rec = &plan->records[i];
/* Wait for APs if the record is not released */
if (atomic_read(&rec->barrier) == 0) {
@@ -398,12 +526,22 @@ static int bsp_do_flight_plan(struct udevice *cpu, struct mp_params *mp_params)
release_barrier(&rec->barrier);
}
+
return ret;
}
-static int init_bsp(struct udevice **devp)
+/**
+ * get_bsp() - Get information about the bootstrap processor
+ *
+ * @devp: If non-NULL, returns CPU device corresponding to the BSP
+ * @cpu_countp: If non-NULL, returns the total number of CPUs
+ * @return CPU number of the BSP, or -ve on error. If multiprocessing is not
+ * enabled, returns 0
+ */
+static int get_bsp(struct udevice **devp, int *cpu_countp)
{
char processor_name[CPU_MAX_NAME_LEN];
+ struct udevice *dev;
int apic_id;
int ret;
@@ -411,61 +549,333 @@ static int init_bsp(struct udevice **devp)
debug("CPU: %s\n", processor_name);
apic_id = lapicid();
- ret = find_cpu_by_apic_id(apic_id, devp);
- if (ret) {
+ ret = find_cpu_by_apic_id(apic_id, &dev);
+ if (ret < 0) {
printf("Cannot find boot CPU, APIC ID %d\n", apic_id);
return ret;
}
+ ret = cpu_get_count(dev);
+ if (ret < 0)
+ return log_msg_ret("count", ret);
+ if (devp)
+ *devp = dev;
+ if (cpu_countp)
+ *cpu_countp = ret;
+
+ return dev->req_seq >= 0 ? dev->req_seq : 0;
+}
+
+/**
+ * read_callback() - Read the pointer in a callback slot
+ *
+ * This is called by APs to read their callback slot to see if there is a
+ * pointer to new instructions
+ *
+ * @slot: Pointer to the AP's callback slot
+ * @return value of that pointer
+ */
+static struct mp_callback *read_callback(struct mp_callback **slot)
+{
+ dmb();
+
+ return *slot;
+}
+
+/**
+ * store_callback() - Store a pointer to the callback slot
+ *
+ * This is called by APs to write NULL into the callback slot when they have
+ * finished the work requested by the BSP.
+ *
+ * @slot: Pointer to the AP's callback slot
+ * @val: Value to write (e.g. NULL)
+ */
+static void store_callback(struct mp_callback **slot, struct mp_callback *val)
+{
+ *slot = val;
+ dmb();
+}
+
+/**
+ * run_ap_work() - Run a callback on selected APs
+ *
+ * This writes @callback to all APs and waits for them all to acknowledge it,
+ * Note that whether each AP actually calls the callback depends on the value
+ * of logical_cpu_number (see struct mp_callback). The logical CPU number is
+ * the CPU device's req->seq value.
+ *
+ * @callback: Callback information to pass to all APs
+ * @bsp: CPU device for the BSP
+ * @num_cpus: The number of CPUs in the system (= number of APs + 1)
+ * @expire_ms: Timeout to wait for all APs to finish, in milliseconds, or 0 for
+ * no timeout
+ * @return 0 if OK, -ETIMEDOUT if one or more APs failed to respond in time
+ */
+static int run_ap_work(struct mp_callback *callback, struct udevice *bsp,
+ int num_cpus, uint expire_ms)
+{
+ int cur_cpu = bsp->req_seq;
+ int num_aps = num_cpus - 1; /* number of non-BSPs to get this message */
+ int cpus_accepted;
+ ulong start;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_SMP_AP_WORK)) {
+ printf("APs already parked. CONFIG_SMP_AP_WORK not enabled\n");
+ return -ENOTSUPP;
+ }
+
+ /* Signal to all the APs to run the func. */
+ for (i = 0; i < num_cpus; i++) {
+ if (cur_cpu != i)
+ store_callback(&ap_callbacks[i], callback);
+ }
+ mfence();
+
+ /* Wait for all the APs to signal back that call has been accepted. */
+ start = get_timer(0);
+
+ do {
+ mdelay(1);
+ cpus_accepted = 0;
+
+ for (i = 0; i < num_cpus; i++) {
+ if (cur_cpu == i)
+ continue;
+ if (!read_callback(&ap_callbacks[i]))
+ cpus_accepted++;
+ }
+
+ if (expire_ms && get_timer(start) >= expire_ms) {
+ log(UCLASS_CPU, LOGL_CRIT,
+ "AP call expired; %d/%d CPUs accepted\n",
+ cpus_accepted, num_aps);
+ return -ETIMEDOUT;
+ }
+ } while (cpus_accepted != num_aps);
+
+ /* Make sure we can see any data written by the APs */
+ mfence();
return 0;
}
-int mp_init(struct mp_params *p)
+/**
+ * ap_wait_for_instruction() - Wait for and process requests from the main CPU
+ *
+ * This is called by APs (here, everything other than the main boot CPU) to
+ * await instructions. They arrive in the form of a function call and argument,
+ * which is then called. This uses a simple mailbox with atomic read/set
+ *
+ * @cpu: CPU that is waiting
+ * @unused: Optional argument provided by struct mp_flight_record, not used here
+ * @return Does not return
+ */
+static int ap_wait_for_instruction(struct udevice *cpu, void *unused)
{
- int num_aps;
- atomic_t *ap_count;
- struct udevice *cpu;
+ struct mp_callback lcb;
+ struct mp_callback **per_cpu_slot;
+
+ if (!IS_ENABLED(CONFIG_SMP_AP_WORK))
+ return 0;
+
+ per_cpu_slot = &ap_callbacks[cpu->req_seq];
+
+ while (1) {
+ struct mp_callback *cb = read_callback(per_cpu_slot);
+
+ if (!cb) {
+ asm ("pause");
+ continue;
+ }
+
+ /* Copy to local variable before using the value */
+ memcpy(&lcb, cb, sizeof(lcb));
+ mfence();
+ if (lcb.logical_cpu_number == MP_SELECT_ALL ||
+ lcb.logical_cpu_number == MP_SELECT_APS ||
+ cpu->req_seq == lcb.logical_cpu_number)
+ lcb.func(lcb.arg);
+
+ /* Indicate we are finished */
+ store_callback(per_cpu_slot, NULL);
+ }
+
+ return 0;
+}
+
+static int mp_init_cpu(struct udevice *cpu, void *unused)
+{
+ struct cpu_platdata *plat = dev_get_parent_platdata(cpu);
+
+ plat->ucode_version = microcode_read_rev();
+ plat->device_id = gd->arch.x86_device;
+
+ return device_probe(cpu);
+}
+
+static struct mp_flight_record mp_steps[] = {
+ MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL),
+ MP_FR_BLOCK_APS(ap_wait_for_instruction, NULL, NULL, NULL),
+};
+
+int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg)
+{
+ struct mp_callback lcb = {
+ .func = func,
+ .arg = arg,
+ .logical_cpu_number = cpu_select,
+ };
+ struct udevice *dev;
+ int num_cpus;
int ret;
- /* This will cause the CPUs devices to be bound */
- struct uclass *uc;
- ret = uclass_get(UCLASS_CPU, &uc);
+ ret = get_bsp(&dev, &num_cpus);
+ if (ret < 0)
+ return log_msg_ret("bsp", ret);
+ if (cpu_select == MP_SELECT_ALL || cpu_select == MP_SELECT_BSP ||
+ cpu_select == ret) {
+ /* Run on BSP first */
+ func(arg);
+ }
+
+ if (!IS_ENABLED(CONFIG_SMP_AP_WORK) ||
+ !(gd->flags & GD_FLG_SMP_READY)) {
+ /* Allow use of this function on the BSP only */
+ if (cpu_select == MP_SELECT_BSP || !cpu_select)
+ return 0;
+ return -ENOTSUPP;
+ }
+
+ /* Allow up to 1 second for all APs to finish */
+ ret = run_ap_work(&lcb, dev, num_cpus, 1000 /* ms */);
+ if (ret)
+ return log_msg_ret("aps", ret);
+
+ return 0;
+}
+
+static void park_this_cpu(void *unused)
+{
+ stop_this_cpu();
+}
+
+int mp_park_aps(void)
+{
+ int ret;
+
+ ret = mp_run_on_cpus(MP_SELECT_APS, park_this_cpu, NULL);
if (ret)
+ return log_ret(ret);
+
+ return 0;
+}
+
+int mp_first_cpu(int cpu_select)
+{
+ struct udevice *dev;
+ int num_cpus;
+ int ret;
+
+ /*
+ * This assumes that CPUs are numbered from 0. This function tries to
+ * avoid assuming the CPU 0 is the boot CPU
+ */
+ if (cpu_select == MP_SELECT_ALL)
+ return 0; /* start with the first one */
+
+ ret = get_bsp(&dev, &num_cpus);
+ if (ret < 0)
+ return log_msg_ret("bsp", ret);
+
+ /* Return boot CPU if requested */
+ if (cpu_select == MP_SELECT_BSP)
return ret;
+ /* Return something other than the boot CPU, if APs requested */
+ if (cpu_select == MP_SELECT_APS && num_cpus > 1)
+ return ret == 0 ? 1 : 0;
+
+ /* Try to check for an invalid value */
+ if (cpu_select < 0 || cpu_select >= num_cpus)
+ return -EINVAL;
+
+ return cpu_select; /* return the only selected one */
+}
+
+int mp_next_cpu(int cpu_select, int prev_cpu)
+{
+ struct udevice *dev;
+ int num_cpus;
+ int ret;
+ int bsp;
+
+ /* If we selected the BSP or a particular single CPU, we are done */
+ if (!IS_ENABLED(CONFIG_SMP_AP_WORK) || cpu_select == MP_SELECT_BSP ||
+ cpu_select >= 0)
+ return -EFBIG;
+
+ /* Must be doing MP_SELECT_ALL or MP_SELECT_APS; return the next CPU */
+ ret = get_bsp(&dev, &num_cpus);
+ if (ret < 0)
+ return log_msg_ret("bsp", ret);
+ bsp = ret;
+
+ /* Move to the next CPU */
+ assert(prev_cpu >= 0);
+ ret = prev_cpu + 1;
+
+ /* Skip the BSP if needed */
+ if (cpu_select == MP_SELECT_APS && ret == bsp)
+ ret++;
+ if (ret >= num_cpus)
+ return -EFBIG;
+
+ return ret;
+}
+
+int mp_init(void)
+{
+ int num_aps, num_cpus;
+ atomic_t *ap_count;
+ struct udevice *cpu;
+ struct uclass *uc;
+ int ret;
+
if (IS_ENABLED(CONFIG_QFW)) {
ret = qemu_cpu_fixup();
if (ret)
return ret;
}
- ret = init_bsp(&cpu);
- if (ret) {
+ /*
+ * Multiple APs are brought up simultaneously and they may get the same
+ * seq num in the uclass_resolve_seq() during device_probe(). To avoid
+ * this, set req_seq to the reg number in the device tree in advance.
+ */
+ uclass_id_foreach_dev(UCLASS_CPU, cpu, uc)
+ cpu->req_seq = dev_read_u32_default(cpu, "reg", -1);
+
+ ret = get_bsp(&cpu, &num_cpus);
+ if (ret < 0) {
debug("Cannot init boot CPU: err=%d\n", ret);
return ret;
}
- if (p == NULL || p->flight_plan == NULL || p->num_records < 1) {
- printf("Invalid MP parameters\n");
- return -EINVAL;
- }
-
- num_cpus = cpu_get_count(cpu);
- if (num_cpus < 0) {
- debug("Cannot get number of CPUs: err=%d\n", num_cpus);
- return num_cpus;
- }
-
if (num_cpus < 2)
debug("Warning: Only 1 CPU is detected\n");
ret = check_cpu_devices(num_cpus);
if (ret)
- debug("Warning: Device tree does not describe all CPUs. Extra ones will not be started correctly\n");
+ log_warning("Warning: Device tree does not describe all CPUs. Extra ones will not be started correctly\n");
+
+ ap_callbacks = calloc(num_cpus, sizeof(struct mp_callback *));
+ if (!ap_callbacks)
+ return -ENOMEM;
/* Copy needed parameters so that APs have a reference to the plan */
- mp_info.num_records = p->num_records;
- mp_info.records = p->flight_plan;
+ mp_info.num_records = ARRAY_SIZE(mp_steps);
+ mp_info.records = mp_steps;
/* Load the SIPI vector */
ret = load_sipi_vector(&ap_count, num_cpus);
@@ -489,28 +899,12 @@ int mp_init(struct mp_params *p)
}
/* Walk the flight plan for the BSP */
- ret = bsp_do_flight_plan(cpu, p);
+ ret = bsp_do_flight_plan(cpu, &mp_info, num_aps);
if (ret) {
debug("CPU init failed: err=%d\n", ret);
return ret;
}
+ gd->flags |= GD_FLG_SMP_READY;
return 0;
}
-
-int mp_init_cpu(struct udevice *cpu, void *unused)
-{
- struct cpu_platdata *plat = dev_get_parent_platdata(cpu);
-
- /*
- * Multiple APs are brought up simultaneously and they may get the same
- * seq num in the uclass_resolve_seq() during device_probe(). To avoid
- * this, set req_seq to the reg number in the device tree in advance.
- */
- cpu->req_seq = fdtdec_get_int(gd->fdt_blob, dev_of_offset(cpu), "reg",
- -1);
- plat->ucode_version = microcode_read_rev();
- plat->device_id = gd->arch.x86_device;
-
- return device_probe(cpu);
-}
diff --git a/arch/x86/cpu/mtrr.c b/arch/x86/cpu/mtrr.c
index 7ec0733337..2468d88a80 100644
--- a/arch/x86/cpu/mtrr.c
+++ b/arch/x86/cpu/mtrr.c
@@ -21,6 +21,7 @@
#include <log.h>
#include <asm/cache.h>
#include <asm/io.h>
+#include <asm/mp.h>
#include <asm/msr.h>
#include <asm/mtrr.h>
@@ -63,10 +64,71 @@ static void set_var_mtrr(uint reg, uint type, uint64_t start, uint64_t size)
wrmsrl(MTRR_PHYS_MASK_MSR(reg), mask | MTRR_PHYS_MASK_VALID);
}
+void mtrr_read_all(struct mtrr_info *info)
+{
+ int i;
+
+ for (i = 0; i < MTRR_COUNT; i++) {
+ info->mtrr[i].base = native_read_msr(MTRR_PHYS_BASE_MSR(i));
+ info->mtrr[i].mask = native_read_msr(MTRR_PHYS_MASK_MSR(i));
+ }
+}
+
+void mtrr_write_all(struct mtrr_info *info)
+{
+ struct mtrr_state state;
+ int i;
+
+ for (i = 0; i < MTRR_COUNT; i++) {
+ mtrr_open(&state, true);
+ wrmsrl(MTRR_PHYS_BASE_MSR(i), info->mtrr[i].base);
+ wrmsrl(MTRR_PHYS_MASK_MSR(i), info->mtrr[i].mask);
+ mtrr_close(&state, true);
+ }
+}
+
+static void write_mtrrs(void *arg)
+{
+ struct mtrr_info *info = arg;
+
+ mtrr_write_all(info);
+}
+
+static void read_mtrrs(void *arg)
+{
+ struct mtrr_info *info = arg;
+
+ mtrr_read_all(info);
+}
+
+/**
+ * mtrr_copy_to_aps() - Copy the MTRRs from the boot CPU to other CPUs
+ *
+ * @return 0 on success, -ve on failure
+ */
+static int mtrr_copy_to_aps(void)
+{
+ struct mtrr_info info;
+ int ret;
+
+ ret = mp_run_on_cpus(MP_SELECT_BSP, read_mtrrs, &info);
+ if (ret == -ENXIO)
+ return 0;
+ else if (ret)
+ return log_msg_ret("bsp", ret);
+
+ ret = mp_run_on_cpus(MP_SELECT_APS, write_mtrrs, &info);
+ if (ret)
+ return log_msg_ret("bsp", ret);
+
+ return 0;
+}
+
int mtrr_commit(bool do_caches)
{
struct mtrr_request *req = gd->arch.mtrr_req;
struct mtrr_state state;
+ int ret;
int i;
debug("%s: enabled=%d, count=%d\n", __func__, gd->arch.has_mtrr,
@@ -88,6 +150,12 @@ int mtrr_commit(bool do_caches)
mtrr_close(&state, do_caches);
debug("mtrr done\n");
+ if (gd->flags & GD_FLG_RELOC) {
+ ret = mtrr_copy_to_aps();
+ if (ret)
+ return log_msg_ret("copy", ret);
+ }
+
return 0;
}
@@ -153,3 +221,84 @@ int mtrr_set_next_var(uint type, uint64_t start, uint64_t size)
return 0;
}
+
+/** enum mtrr_opcode - supported operations for mtrr_do_oper() */
+enum mtrr_opcode {
+ MTRR_OP_SET,
+ MTRR_OP_SET_VALID,
+};
+
+/**
+ * struct mtrr_oper - An MTRR operation to perform on a CPU
+ *
+ * @opcode: Indicates operation to perform
+ * @reg: MTRR reg number to select (0-7, -1 = all)
+ * @valid: Valid value to write for MTRR_OP_SET_VALID
+ * @base: Base value to write for MTRR_OP_SET
+ * @mask: Mask value to write for MTRR_OP_SET
+ */
+struct mtrr_oper {
+ enum mtrr_opcode opcode;
+ int reg;
+ bool valid;
+ u64 base;
+ u64 mask;
+};
+
+static void mtrr_do_oper(void *arg)
+{
+ struct mtrr_oper *oper = arg;
+ u64 mask;
+
+ switch (oper->opcode) {
+ case MTRR_OP_SET_VALID:
+ mask = native_read_msr(MTRR_PHYS_MASK_MSR(oper->reg));
+ if (oper->valid)
+ mask |= MTRR_PHYS_MASK_VALID;
+ else
+ mask &= ~MTRR_PHYS_MASK_VALID;
+ wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), mask);
+ break;
+ case MTRR_OP_SET:
+ wrmsrl(MTRR_PHYS_BASE_MSR(oper->reg), oper->base);
+ wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), oper->mask);
+ break;
+ }
+}
+
+static int mtrr_start_op(int cpu_select, struct mtrr_oper *oper)
+{
+ struct mtrr_state state;
+ int ret;
+
+ mtrr_open(&state, true);
+ ret = mp_run_on_cpus(cpu_select, mtrr_do_oper, oper);
+ mtrr_close(&state, true);
+ if (ret)
+ return log_msg_ret("run", ret);
+
+ return 0;
+}
+
+int mtrr_set_valid(int cpu_select, int reg, bool valid)
+{
+ struct mtrr_oper oper;
+
+ oper.opcode = MTRR_OP_SET_VALID;
+ oper.reg = reg;
+ oper.valid = valid;
+
+ return mtrr_start_op(cpu_select, &oper);
+}
+
+int mtrr_set(int cpu_select, int reg, u64 base, u64 mask)
+{
+ struct mtrr_oper oper;
+
+ oper.opcode = MTRR_OP_SET;
+ oper.reg = reg;
+ oper.base = base;
+ oper.mask = mask;
+
+ return mtrr_start_op(cpu_select, &oper);
+}
diff --git a/arch/x86/include/asm/mp.h b/arch/x86/include/asm/mp.h
index 9dddf88b5a..5f9b8c6564 100644
--- a/arch/x86/include/asm/mp.h
+++ b/arch/x86/include/asm/mp.h
@@ -11,6 +11,17 @@
#include <asm/atomic.h>
#include <asm/cache.h>
+enum {
+ /* Indicates that the function should run on all CPUs */
+ MP_SELECT_ALL = -1,
+
+ /* Run on boot CPUs */
+ MP_SELECT_BSP = -2,
+
+ /* Run on non-boot CPUs */
+ MP_SELECT_APS = -3,
+};
+
typedef int (*mp_callback_t)(struct udevice *cpu, void *arg);
/*
@@ -25,6 +36,14 @@ typedef int (*mp_callback_t)(struct udevice *cpu, void *arg);
*
* Note that ap_call() and bsp_call() can be NULL. In the NULL case the
* callback will just not be called.
+ *
+ * @barrier: Ensures that the BSP and AP don't run the flight record at the same
+ * time
+ * @cpus_entered: Counts the number of APs that have run this record
+ * @ap_call: Function for the APs to call
+ * @ap_arg: Argument to pass to @ap_call
+ * @bsp_call: Function for the BSP to call
+ * @bsp_arg: Argument to pass to @bsp_call
*/
struct mp_flight_record {
atomic_t barrier;
@@ -52,21 +71,6 @@ struct mp_flight_record {
MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg)
/*
- * The mp_params structure provides the arguments to the mp subsystem
- * for bringing up APs.
- *
- * At present this is overkill for U-Boot, but it may make it easier to add
- * SMM support.
- */
-struct mp_params {
- int parallel_microcode_load;
- const void *microcode_pointer;
- /* Flight plan for APs and BSP */
- struct mp_flight_record *flight_plan;
- int num_records;
-};
-
-/*
* mp_init() will set up the SIPI vector and bring up the APs according to
* mp_params. Each flight record will be executed according to the plan. Note
* that the MP infrastructure uses SMM default area without saving it. It's
@@ -85,12 +89,105 @@ struct mp_params {
*
* mp_init() returns < 0 on error, 0 on success.
*/
-int mp_init(struct mp_params *params);
+int mp_init(void);
-/* Probes the CPU device */
-int mp_init_cpu(struct udevice *cpu, void *unused);
-
-/* Set up additional CPUs */
+/**
+ * x86_mp_init() - Set up additional CPUs
+ *
+ * @returns < 0 on error, 0 on success.
+ */
int x86_mp_init(void);
+/**
+ * mp_run_func() - Function to call on the AP
+ *
+ * @arg: Argument to pass
+ */
+typedef void (*mp_run_func)(void *arg);
+
+#if CONFIG_IS_ENABLED(SMP) && !CONFIG_IS_ENABLED(X86_64)
+/**
+ * mp_run_on_cpus() - Run a function on one or all CPUs
+ *
+ * This does not return until all CPUs have completed the work
+ *
+ * Running on anything other than the boot CPU is only supported if
+ * CONFIG_SMP_AP_WORK is enabled
+ *
+ * @cpu_select: CPU to run on (its dev->req_seq value), or MP_SELECT_ALL for
+ * all, or MP_SELECT_BSP for BSP
+ * @func: Function to run
+ * @arg: Argument to pass to the function
+ * @return 0 on success, -ve on error
+ */
+int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg);
+
+/**
+ * mp_park_aps() - Park the APs ready for the OS
+ *
+ * This halts all CPUs except the main one, ready for the OS to use them
+ *
+ * @return 0 if OK, -ve on error
+ */
+int mp_park_aps(void);
+
+/**
+ * mp_first_cpu() - Get the first CPU to process, from a selection
+ *
+ * This is used to iterate through selected CPUs. Call this function first, then
+ * call mp_next_cpu() repeatedly (with the same @cpu_select) until it returns
+ * -EFBIG.
+ *
+ * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
+ * @return next CPU number to run on (e.g. 0)
+ */
+int mp_first_cpu(int cpu_select);
+
+/**
+ * mp_next_cpu() - Get the next CPU to process, from a selection
+ *
+ * This is used to iterate through selected CPUs. After first calling
+ * mp_first_cpu() once, call this function repeatedly until it returns -EFBIG.
+ *
+ * The value of @cpu_select must be the same for all calls and must match the
+ * value passed to mp_first_cpu(), otherwise the behaviour is undefined.
+ *
+ * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
+ * @prev_cpu: Previous value returned by mp_first_cpu()/mp_next_cpu()
+ * @return next CPU number to run on (e.g. 0)
+ */
+int mp_next_cpu(int cpu_select, int prev_cpu);
+#else
+static inline int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg)
+{
+ /* There is only one CPU, so just call the function here */
+ func(arg);
+
+ return 0;
+}
+
+static inline int mp_park_aps(void)
+{
+ /* No APs to park */
+
+ return 0;
+}
+
+static inline int mp_first_cpu(int cpu_select)
+{
+ /* We cannot run on any APs, nor a selected CPU */
+ return cpu_select == MP_SELECT_APS ? -EFBIG : MP_SELECT_BSP;
+}
+
+static inline int mp_next_cpu(int cpu_select, int prev_cpu)
+{
+ /*
+ * When MP is not enabled, there is only one CPU and we did it in
+ * mp_first_cpu()
+ */
+ return -EFBIG;
+}
+
+#endif
+
#endif /* _X86_MP_H_ */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 212a699c1b..48db1dd82f 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -71,6 +71,26 @@ struct mtrr_state {
};
/**
+ * struct mtrr - Information about a single MTRR
+ *
+ * @base: Base address and MTRR_BASE_TYPE_MASK
+ * @mask: Mask and MTRR_PHYS_MASK_VALID
+ */
+struct mtrr {
+ u64 base;
+ u64 mask;
+};
+
+/**
+ * struct mtrr_info - Information about all MTRRs
+ *
+ * @mtrr: Information about each mtrr
+ */
+struct mtrr_info {
+ struct mtrr mtrr[MTRR_COUNT];
+};
+
+/**
* mtrr_open() - Prepare to adjust MTRRs
*
* Use mtrr_open() passing in a structure - this function will init it. Then
@@ -129,6 +149,37 @@ int mtrr_commit(bool do_caches);
*/
int mtrr_set_next_var(uint type, uint64_t base, uint64_t size);
+/**
+ * mtrr_read_all() - Save all the MTRRs
+ *
+ * This reads all MTRRs from the boot CPU into a struct so they can be loaded
+ * onto other CPUs
+ *
+ * @info: Place to put the MTRR info
+ */
+void mtrr_read_all(struct mtrr_info *info);
+
+/**
+ * mtrr_set_valid() - Set the valid flag for a selected MTRR and CPU(s)
+ *
+ * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
+ * @reg: MTRR register to write (0-7)
+ * @valid: Valid flag to write
+ * @return 0 on success, -ve on error
+ */
+int mtrr_set_valid(int cpu_select, int reg, bool valid);
+
+/**
+ * mtrr_set() - Set the valid flag for a selected MTRR and CPU(s)
+ *
+ * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
+ * @reg: MTRR register to write (0-7)
+ * @base: Base address and MTRR_BASE_TYPE_MASK
+ * @mask: Mask and MTRR_PHYS_MASK_VALID
+ * @return 0 on success, -ve on error
+ */
+int mtrr_set(int cpu_select, int reg, u64 base, u64 mask);
+
#endif
#if ((CONFIG_XIP_ROM_SIZE & (CONFIG_XIP_ROM_SIZE - 1)) != 0)
diff --git a/board/Marvell/octeon_ebb7304/Kconfig b/board/Marvell/octeon_ebb7304/Kconfig
new file mode 100644
index 0000000000..ab54e6dbbc
--- /dev/null
+++ b/board/Marvell/octeon_ebb7304/Kconfig
@@ -0,0 +1,19 @@
+if TARGET_OCTEON_EBB7304
+
+config SYS_BOARD
+ string
+ default "octeon_ebb7304"
+
+config SYS_VENDOR
+ string
+ default "Marvell"
+
+config SYS_CONFIG_NAME
+ string
+ default "octeon_ebb7304"
+
+config DEFAULT_DEVICE_TREE
+ string
+ default "mrvl,octeon-ebb7304"
+
+endif
diff --git a/board/Marvell/octeon_ebb7304/MAINTAINERS b/board/Marvell/octeon_ebb7304/MAINTAINERS
new file mode 100644
index 0000000000..7256f83567
--- /dev/null
+++ b/board/Marvell/octeon_ebb7304/MAINTAINERS
@@ -0,0 +1,7 @@
+OCTEON_EBB7304 BOARD
+M: Aaron Williams <awilliams@marvell.com>
+S: Maintained
+F: board/Marvell/octeon_ebb7304/*
+F: configs/octeon_ebb7304_defconfig
+F: include/configs/octeon_ebb7304.h
+F: arch/mips/dts/mrvl,octeon-ebb7304.dts
diff --git a/board/Marvell/octeon_ebb7304/Makefile b/board/Marvell/octeon_ebb7304/Makefile
new file mode 100644
index 0000000000..a3ed0c8873
--- /dev/null
+++ b/board/Marvell/octeon_ebb7304/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) 2020 Stefan Roese <sr@denx.de>
+# Copyright (C) 2019-2020 Marvell International Ltd.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y := board.o
diff --git a/board/Marvell/octeon_ebb7304/board.c b/board/Marvell/octeon_ebb7304/board.c
new file mode 100644
index 0000000000..56e50a9063
--- /dev/null
+++ b/board/Marvell/octeon_ebb7304/board.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Stefan Roese <sr@denx.de>
+ */
+
+/*
+ * Nothing included right now. Code will be added in follow-up
+ * patches.
+ */
diff --git a/cmd/x86/mtrr.c b/cmd/x86/mtrr.c
index 084d7315f4..e118bba5a2 100644
--- a/cmd/x86/mtrr.c
+++ b/cmd/x86/mtrr.c
@@ -5,7 +5,9 @@
#include <common.h>
#include <command.h>
+#include <log.h>
#include <asm/msr.h>
+#include <asm/mp.h>
#include <asm/mtrr.h>
static const char *const mtrr_type_name[MTRR_TYPE_COUNT] = {
@@ -18,19 +20,32 @@ static const char *const mtrr_type_name[MTRR_TYPE_COUNT] = {
"Back",
};
-static int do_mtrr_list(void)
+static void read_mtrrs(void *arg)
{
+ struct mtrr_info *info = arg;
+
+ mtrr_read_all(info);
+}
+
+static int do_mtrr_list(int cpu_select)
+{
+ struct mtrr_info info;
+ int ret;
int i;
printf("Reg Valid Write-type %-16s %-16s %-16s\n", "Base ||",
"Mask ||", "Size ||");
+ memset(&info, '\0', sizeof(info));
+ ret = mp_run_on_cpus(cpu_select, read_mtrrs, &info);
+ if (ret)
+ return log_msg_ret("run", ret);
for (i = 0; i < MTRR_COUNT; i++) {
const char *type = "Invalid";
uint64_t base, mask, size;
bool valid;
- base = native_read_msr(MTRR_PHYS_BASE_MSR(i));
- mask = native_read_msr(MTRR_PHYS_MASK_MSR(i));
+ base = info.mtrr[i].base;
+ mask = info.mtrr[i].mask;
size = ~mask & ((1ULL << CONFIG_CPU_ADDR_BITS) - 1);
size |= (1 << 12) - 1;
size += 1;
@@ -44,14 +59,14 @@ static int do_mtrr_list(void)
return 0;
}
-static int do_mtrr_set(uint reg, int argc, char *const argv[])
+static int do_mtrr_set(int cpu_select, uint reg, int argc, char *const argv[])
{
const char *typename = argv[0];
- struct mtrr_state state;
uint32_t start, size;
uint64_t base, mask;
int i, type = -1;
bool valid;
+ int ret;
if (argc < 3)
return CMD_RET_USAGE;
@@ -73,27 +88,9 @@ static int do_mtrr_set(uint reg, int argc, char *const argv[])
if (valid)
mask |= MTRR_PHYS_MASK_VALID;
- mtrr_open(&state, true);
- wrmsrl(MTRR_PHYS_BASE_MSR(reg), base);
- wrmsrl(MTRR_PHYS_MASK_MSR(reg), mask);
- mtrr_close(&state, true);
-
- return 0;
-}
-
-static int mtrr_set_valid(int reg, bool valid)
-{
- struct mtrr_state state;
- uint64_t mask;
-
- mtrr_open(&state, true);
- mask = native_read_msr(MTRR_PHYS_MASK_MSR(reg));
- if (valid)
- mask |= MTRR_PHYS_MASK_VALID;
- else
- mask &= ~MTRR_PHYS_MASK_VALID;
- wrmsrl(MTRR_PHYS_MASK_MSR(reg), mask);
- mtrr_close(&state, true);
+ ret = mtrr_set(cpu_select, reg, base, mask);
+ if (ret)
+ return CMD_RET_FAILURE;
return 0;
}
@@ -101,39 +98,92 @@ static int mtrr_set_valid(int reg, bool valid)
static int do_mtrr(struct cmd_tbl *cmdtp, int flag, int argc,
char *const argv[])
{
- const char *cmd;
+ int cmd;
+ int cpu_select;
uint reg;
-
- cmd = argv[1];
- if (argc < 2 || *cmd == 'l')
- return do_mtrr_list();
- argc -= 2;
- argv += 2;
- if (argc <= 0)
- return CMD_RET_USAGE;
- reg = simple_strtoul(argv[0], NULL, 16);
- if (reg >= MTRR_COUNT) {
- printf("Invalid register number\n");
- return CMD_RET_USAGE;
+ int ret;
+
+ cpu_select = MP_SELECT_BSP;
+ if (argc >= 3 && !strcmp("-c", argv[1])) {
+ const char *cpustr;
+
+ cpustr = argv[2];
+ if (*cpustr == 'a')
+ cpu_select = MP_SELECT_ALL;
+ else
+ cpu_select = simple_strtol(cpustr, NULL, 16);
+ argc -= 2;
+ argv += 2;
+ }
+ argc--;
+ argv++;
+ cmd = argv[0] ? *argv[0] : 0;
+ if (argc < 1 || !cmd) {
+ cmd = 'l';
+ reg = 0;
+ } else {
+ if (argc < 2)
+ return CMD_RET_USAGE;
+ reg = simple_strtoul(argv[1], NULL, 16);
+ if (reg >= MTRR_COUNT) {
+ printf("Invalid register number\n");
+ return CMD_RET_USAGE;
+ }
+ }
+ if (cmd == 'l') {
+ bool first;
+ int i;
+
+ i = mp_first_cpu(cpu_select);
+ if (i < 0) {
+ printf("Invalid CPU (err=%d)\n", i);
+ return CMD_RET_FAILURE;
+ }
+ first = true;
+ for (; i >= 0; i = mp_next_cpu(cpu_select, i)) {
+ if (!first)
+ printf("\n");
+ printf("CPU %d:\n", i);
+ ret = do_mtrr_list(i);
+ if (ret) {
+ printf("Failed to read CPU %d (err=%d)\n", i,
+ ret);
+ return CMD_RET_FAILURE;
+ }
+ first = false;
+ }
+ } else {
+ switch (cmd) {
+ case 'e':
+ ret = mtrr_set_valid(cpu_select, reg, true);
+ break;
+ case 'd':
+ ret = mtrr_set_valid(cpu_select, reg, false);
+ break;
+ case 's':
+ ret = do_mtrr_set(cpu_select, reg, argc - 2, argv + 2);
+ break;
+ default:
+ return CMD_RET_USAGE;
+ }
+ if (ret) {
+ printf("Operation failed (err=%d)\n", ret);
+ return CMD_RET_FAILURE;
+ }
}
- if (*cmd == 'e')
- return mtrr_set_valid(reg, true);
- else if (*cmd == 'd')
- return mtrr_set_valid(reg, false);
- else if (*cmd == 's')
- return do_mtrr_set(reg, argc - 1, argv + 1);
- else
- return CMD_RET_USAGE;
return 0;
}
U_BOOT_CMD(
- mtrr, 6, 1, do_mtrr,
+ mtrr, 8, 1, do_mtrr,
"Use x86 memory type range registers (32-bit only)",
"[list] - list current registers\n"
"set <reg> <type> <start> <size> - set a register\n"
"\t<type> is Uncacheable, Combine, Through, Protect, Back\n"
"disable <reg> - disable a register\n"
- "ensable <reg> - enable a register"
+ "enable <reg> - enable a register\n"
+ "\n"
+ "Precede command with '-c <n>|all' to access a particular hex CPU, e.g.\n"
+ " mtrr -c all list; mtrr -c 2e list"
);
diff --git a/configs/octeon_ebb7304_defconfig b/configs/octeon_ebb7304_defconfig
new file mode 100644
index 0000000000..d810b1e45f
--- /dev/null
+++ b/configs/octeon_ebb7304_defconfig
@@ -0,0 +1,38 @@
+CONFIG_MIPS=y
+CONFIG_SYS_TEXT_BASE=0xffffffff80000000
+CONFIG_SYS_MALLOC_F_LEN=0x4000
+CONFIG_ENV_SIZE=0x2000
+CONFIG_ENV_SECT_SIZE=0x10000
+CONFIG_NR_DRAM_BANKS=2
+CONFIG_DEBUG_UART_BASE=0x8001180000000800
+CONFIG_DEBUG_UART_CLOCK=1200000000
+CONFIG_ARCH_OCTEON=y
+# CONFIG_MIPS_CACHE_SETUP is not set
+# CONFIG_MIPS_CACHE_DISABLE is not set
+CONFIG_DEBUG_UART=y
+CONFIG_SYS_CONSOLE_INFO_QUIET=y
+CONFIG_HUSH_PARSER=y
+CONFIG_CMD_MTD=y
+CONFIG_CMD_PCI=y
+CONFIG_CMD_DHCP=y
+CONFIG_CMD_PING=y
+CONFIG_CMD_TIME=y
+CONFIG_ENV_IS_IN_FLASH=y
+CONFIG_ENV_ADDR=0x1FBFE000
+CONFIG_CLK=y
+# CONFIG_INPUT is not set
+CONFIG_MTD=y
+CONFIG_DM_MTD=y
+CONFIG_MTD_NOR_FLASH=y
+CONFIG_FLASH_CFI_DRIVER=y
+CONFIG_CFI_FLASH=y
+CONFIG_SYS_FLASH_USE_BUFFER_WRITE=y
+CONFIG_FLASH_CFI_MTD=y
+CONFIG_SYS_FLASH_CFI=y
+# CONFIG_NETDEVICES is not set
+CONFIG_DEBUG_UART_SHIFT=3
+CONFIG_DEBUG_UART_ANNOUNCE=y
+CONFIG_SYS_NS16550=y
+CONFIG_SYSRESET=y
+CONFIG_SYSRESET_OCTEON=y
+CONFIG_HEXDUMP=y
diff --git a/doc/board/google/chromebook_coral.rst b/doc/board/google/chromebook_coral.rst
index 40bd9397d4..c39f1e310c 100644
--- a/doc/board/google/chromebook_coral.rst
+++ b/doc/board/google/chromebook_coral.rst
@@ -188,6 +188,7 @@ Partial memory map
fef00000 1000 CONFIG_BOOTSTAGE_STASH_ADDR
fef00000 Base of CAR region
+ 30000 AP_DEFAULT_BASE (used to start up additional CPUs)
f0000 CONFIG_ROM_TABLE_ADDR
120000 BSS (defined in u-boot-spl.lds)
200000 FSP-S (which is run after U-Boot is relocated)
diff --git a/drivers/core/acpi.c b/drivers/core/acpi.c
index cdbc2c5cf5..7fe93992b5 100644
--- a/drivers/core/acpi.c
+++ b/drivers/core/acpi.c
@@ -195,7 +195,7 @@ static int sort_acpi_item_type(struct acpi_ctx *ctx, void *start,
"u-boot,acpi-dsdt-order" :
"u-boot,acpi-ssdt-order", &size);
if (!order) {
- log_warning("Failed to find ordering, leaving as is\n");
+ log_debug("Failed to find ordering, leaving as is\n");
return 0;
}
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 04de51cb46..6bacf14aaf 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -22,6 +22,16 @@
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
+#define MICRON_CFG_CR BIT(0)
+
+/*
+ * As per datasheet, die selection is done by the 6th bit of Die
+ * Select Register (Address 0xD0).
+ */
+#define MICRON_DIE_SELECT_REG 0xD0
+
+#define MICRON_SELECT_DIE(x) ((x) << 6)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -38,38 +48,52 @@ static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
- region->offset = 64;
- region->length = 64;
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
return 0;
}
-static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
/* Reserve 2 bytes for the BBM. */
region->offset = 2;
- region->length = 62;
+ region->length = (mtd->oobsize / 2) - 2;
return 0;
}
-static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
- .ecc = mt29f2g01abagd_ooblayout_ecc,
- .rfree = mt29f2g01abagd_ooblayout_free,
+static const struct mtd_ooblayout_ops micron_8_ooblayout = {
+ .ecc = micron_8_ooblayout_ecc,
+ .rfree = micron_8_ooblayout_free,
};
-static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int micron_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ spinand->scratchbuf);
+
+ if (target > 1)
+ return -EINVAL;
+
+ *spinand->scratchbuf = MICRON_SELECT_DIE(target);
+
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
+static int micron_8_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
switch (status & MICRON_STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -95,6 +119,7 @@ static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info micron_spinand_table[] = {
+ /* M79A 2Gb 3.3V */
SPINAND_INFO("MT29F2G01ABAGD", 0x24,
NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
NAND_ECCREQ(8, 512),
@@ -102,8 +127,91 @@ static const struct spinand_info micron_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
- mt29f2g01abagd_ecc_get_status)),
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 2Gb 1.8V */
+ SPINAND_INFO("MT29F2G01ABBGD", 0x25,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 3.3V */
+ SPINAND_INFO("MT29F1G01ABAFD", 0x14,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 1.8V */
+ SPINAND_INFO("MT29F1G01ABAFD", 0x15,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ADAGD", 0x36,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ABAFD", 0x34,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 4Gb 1.8V */
+ SPINAND_INFO("MT29F4G01ABBFD", 0x35,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 8Gb 3.3V */
+ SPINAND_INFO("MT29F8G01ADAFD", 0x46,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 8Gb 1.8V */
+ SPINAND_INFO("MT29F8G01ADBFD", 0x47,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
};
static int micron_spinand_detect(struct spinand_device *spinand)
@@ -126,8 +234,22 @@ static int micron_spinand_detect(struct spinand_device *spinand)
return 1;
}
+static int micron_spinand_init(struct spinand_device *spinand)
+{
+ /*
+ * M70A device series enable Continuous Read feature at Power-up,
+ * which is not supported. Disable this bit to avoid any possible
+ * failure.
+ */
+ if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT)
+ return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0);
+
+ return 0;
+}
+
static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
.detect = micron_spinand_detect,
+ .init = micron_spinand_init,
};
const struct spinand_manufacturer micron_spinand_manufacturer = {
diff --git a/drivers/sysreset/Kconfig b/drivers/sysreset/Kconfig
index 4be7433404..6ebc90e1d3 100644
--- a/drivers/sysreset/Kconfig
+++ b/drivers/sysreset/Kconfig
@@ -57,6 +57,13 @@ config SYSRESET_MICROBLAZE
help
This is soft reset on Microblaze which does jump to 0x0 address.
+config SYSRESET_OCTEON
+ bool "Enable support for Marvell Octeon SoC family"
+ depends on ARCH_OCTEON
+ help
+ This enables the system reset driver support for Marvell Octeon
+ SoCs.
+
config SYSRESET_PSCI
bool "Enable support for PSCI System Reset"
depends on ARM_PSCI_FW
diff --git a/drivers/sysreset/Makefile b/drivers/sysreset/Makefile
index 3ed4bab9e3..df2293b848 100644
--- a/drivers/sysreset/Makefile
+++ b/drivers/sysreset/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SANDBOX) += sysreset_sandbox.o
obj-$(CONFIG_SYSRESET_GPIO) += sysreset_gpio.o
obj-$(CONFIG_SYSRESET_MPC83XX) += sysreset_mpc83xx.o
obj-$(CONFIG_SYSRESET_MICROBLAZE) += sysreset_microblaze.o
+obj-$(CONFIG_SYSRESET_OCTEON) += sysreset_octeon.o
obj-$(CONFIG_SYSRESET_PSCI) += sysreset_psci.o
obj-$(CONFIG_SYSRESET_SOCFPGA) += sysreset_socfpga.o
obj-$(CONFIG_SYSRESET_SOCFPGA_S10) += sysreset_socfpga_s10.o
diff --git a/drivers/sysreset/sysreset_octeon.c b/drivers/sysreset/sysreset_octeon.c
new file mode 100644
index 0000000000..a05dac3226
--- /dev/null
+++ b/drivers/sysreset/sysreset_octeon.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Stefan Roese <sr@denx.de>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <sysreset.h>
+#include <asm/io.h>
+
+#define RST_SOFT_RST 0x0080
+
+struct octeon_sysreset_data {
+ void __iomem *base;
+};
+
+static int octeon_sysreset_request(struct udevice *dev, enum sysreset_t type)
+{
+ struct octeon_sysreset_data *data = dev_get_priv(dev);
+
+ writeq(1, data->base + RST_SOFT_RST);
+
+ return -EINPROGRESS;
+}
+
+static int octeon_sysreset_probe(struct udevice *dev)
+{
+ struct octeon_sysreset_data *data = dev_get_priv(dev);
+
+ data->base = dev_remap_addr(dev);
+
+ return 0;
+}
+
+static struct sysreset_ops octeon_sysreset = {
+ .request = octeon_sysreset_request,
+};
+
+static const struct udevice_id octeon_sysreset_ids[] = {
+ { .compatible = "mrvl,cn7xxx-rst" },
+ { }
+};
+
+U_BOOT_DRIVER(sysreset_octeon) = {
+ .id = UCLASS_SYSRESET,
+ .name = "octeon_sysreset",
+ .priv_auto_alloc_size = sizeof(struct octeon_sysreset_data),
+ .ops = &octeon_sysreset,
+ .probe = octeon_sysreset_probe,
+ .of_match = octeon_sysreset_ids,
+};
diff --git a/include/asm-generic/global_data.h b/include/asm-generic/global_data.h
index 8c78792cc9..d4a4e2215d 100644
--- a/include/asm-generic/global_data.h
+++ b/include/asm-generic/global_data.h
@@ -167,5 +167,6 @@ typedef struct global_data {
#define GD_FLG_LOG_READY 0x08000 /* Log system is ready for use */
#define GD_FLG_WDT_READY 0x10000 /* Watchdog is ready for use */
#define GD_FLG_SKIP_LL_INIT 0x20000 /* Don't perform low-level init */
+#define GD_FLG_SMP_READY 0x40000 /* SMP init is complete */
#endif /* __ASM_GENERIC_GBL_DATA_H */
diff --git a/include/configs/octeon_common.h b/include/configs/octeon_common.h
new file mode 100644
index 0000000000..530f02ad3c
--- /dev/null
+++ b/include/configs/octeon_common.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019-2020
+ * Marvell <www.marvell.com>
+ */
+
+#ifndef __OCTEON_COMMON_H__
+#define __OCTEON_COMMON_H__
+
+/* No DDR init yet -> run in L2 cache with limited resources */
+#define CONFIG_SYS_MALLOC_LEN (256 << 10)
+#define CONFIG_SYS_SDRAM_BASE 0xffffffff80000000
+#define CONFIG_SYS_MONITOR_BASE CONFIG_SYS_TEXT_BASE
+
+#define CONFIG_SYS_LOAD_ADDR (CONFIG_SYS_SDRAM_BASE + (1 << 20))
+
+#define CONFIG_SYS_INIT_SP_OFFSET 0x180000
+
+#endif /* __OCTEON_COMMON_H__ */
diff --git a/include/configs/octeon_ebb7304.h b/include/configs/octeon_ebb7304.h
new file mode 100644
index 0000000000..04fe4dfe22
--- /dev/null
+++ b/include/configs/octeon_ebb7304.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019-2020
+ * Marvell <www.marvell.com>
+ */
+
+#ifndef __CONFIG_H__
+#define __CONFIG_H__
+
+#include "octeon_common.h"
+
+/*
+ * CFI flash
+ */
+#define CONFIG_SYS_MAX_FLASH_BANKS 1
+#define CONFIG_SYS_MAX_FLASH_SECT 256
+#define CONFIG_SYS_FLASH_CFI_WIDTH FLASH_CFI_8BIT
+#define CONFIG_SYS_FLASH_EMPTY_INFO /* flinfo indicates empty blocks */
+
+#endif /* __CONFIG_H__ */
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 83eafb184e..88bacde91e 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -246,6 +246,7 @@ struct spinand_ecc_info {
};
#define SPINAND_HAS_QE_BIT BIT(0)
+#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
/**
* struct spinand_info - Structure used to describe SPI NAND chips
diff --git a/scripts/config_whitelist.txt b/scripts/config_whitelist.txt
index 1602b05f07..2ec7642583 100644
--- a/scripts/config_whitelist.txt
+++ b/scripts/config_whitelist.txt
@@ -228,7 +228,6 @@ CONFIG_CPLD_BR_PRELIM
CONFIG_CPLD_OR_PRELIM
CONFIG_CPM2
CONFIG_CPU_ARMV8
-CONFIG_CPU_CAVIUM_OCTEON
CONFIG_CPU_FREQ_HZ
CONFIG_CPU_HAS_LLSC
CONFIG_CPU_HAS_PREFETCH