summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-10-02 01:06:14 +0400
committerDavid S. Miller <davem@davemloft.net>2013-10-02 01:06:14 +0400
commit4fbef95af4e62d4aada6c1728e04d3b1c828abe0 (patch)
tree19cb25e39583119c98dee7114aada6a3b57d18a9 /arch
parent5229432f15e6f1b1e34e519e51d07917dee8790e (diff)
parentc31eeaced22ce8bd61268a3c595d542bb38c0a4f (diff)
downloadlinux-4fbef95af4e62d4aada6c1728e04d3b1c828abe0.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/emulex/benet/be.h drivers/net/usb/qmi_wwan.c drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h include/net/netfilter/nf_conntrack_synproxy.h include/net/secure_seq.h The conflicts are of two varieties: 1) Conflicts with Joe Perches's 'extern' removal from header file function declarations. Usually it's an argument signature change or a function being added/removed. The resolutions are trivial. 2) Some overlapping changes in qmi_wwan.c and be.h, one commit adds a new value, another changes an existing value. That sort of thing. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arc/include/asm/spinlock.h9
-rw-r--r--arch/arc/include/asm/uaccess.h4
-rw-r--r--arch/arc/kernel/time.c7
-rw-r--r--arch/arc/kernel/unaligned.c6
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/crypto/aes-armv4.S6
-rw-r--r--arch/arm/include/asm/uaccess.h7
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-header.S8
-rw-r--r--arch/arm/kvm/reset.c6
-rw-r--r--arch/arm/mach-imx/clk-imx27.c2
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c9
-rw-r--r--arch/arm/mach-shmobile/clock-r8a73a4.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c2
-rw-r--r--arch/avr32/include/asm/Kbuild16
-rw-r--r--arch/avr32/include/asm/cputime.h6
-rw-r--r--arch/avr32/include/asm/delay.h1
-rw-r--r--arch/avr32/include/asm/device.h7
-rw-r--r--arch/avr32/include/asm/div64.h6
-rw-r--r--arch/avr32/include/asm/emergency-restart.h6
-rw-r--r--arch/avr32/include/asm/futex.h6
-rw-r--r--arch/avr32/include/asm/irq_regs.h1
-rw-r--r--arch/avr32/include/asm/local.h6
-rw-r--r--arch/avr32/include/asm/local64.h1
-rw-r--r--arch/avr32/include/asm/percpu.h6
-rw-r--r--arch/avr32/include/asm/scatterlist.h6
-rw-r--r--arch/avr32/include/asm/sections.h6
-rw-r--r--arch/avr32/include/asm/topology.h6
-rw-r--r--arch/avr32/include/asm/xor.h6
-rw-r--r--arch/avr32/kernel/process.c2
-rw-r--r--arch/avr32/kernel/time.c9
-rw-r--r--arch/mips/include/asm/cpu-features.h2
-rw-r--r--arch/mips/mm/dma-default.c12
-rw-r--r--arch/openrisc/include/asm/prom.h44
-rw-r--r--arch/parisc/mm/fault.c5
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/epapr-wrapper.c9
-rw-r--r--arch/powerpc/boot/epapr.c4
-rw-r--r--arch/powerpc/boot/of.c16
-rwxr-xr-xarch/powerpc/boot/wrapper9
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/irq.c100
-rw-r--r--arch/powerpc/kernel/misc_32.S25
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c21
-rw-r--r--arch/powerpc/lib/sstep.c3
-rw-r--r--arch/powerpc/platforms/pseries/smp.c26
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/mutex.h2
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h5
-rw-r--r--arch/score/Kconfig4
-rw-r--r--arch/score/Makefile4
-rw-r--r--arch/score/include/asm/checksum.h93
-rw-r--r--arch/score/include/asm/io.h1
-rw-r--r--arch/score/include/asm/pgalloc.h2
-rw-r--r--arch/score/kernel/entry.S4
-rw-r--r--arch/score/kernel/process.c4
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/x86/include/asm/xen/page.h31
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c10
-rw-r--r--arch/x86/kernel/microcode_amd.c1
-rw-r--r--arch/x86/kernel/reboot.c18
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/platform/efi/efi.c11
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/spinlock.c26
74 files changed, 387 insertions, 343 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 1feb169274fe..af2cc6eabcc7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
config HAVE_ARCH_JUMP_LABEL
bool
-config HAVE_ARCH_MUTEX_CPU_RELAX
- bool
-
config HAVE_RCU_TABLE_FREE
bool
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b0..b6a8c2dfbe6e 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+ unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
smp_mb();
}
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375b..30c9baffa96f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
* Because it essentially checks if buffer end is within limit and @len is
* non-ngeative, which implies that buffer start will be within limit too.
*
- * The reason for rewriting being, for majorit yof cases, @len is generally
+ * The reason for rewriting being, for majority of cases, @len is generally
* compile time constant, causing first sub-expression to be compile time
* subsumed.
*
@@ -53,7 +53,7 @@
*
*/
#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
- (((addr)+(sz)) <= get_fs()))
+ ((addr) <= (get_fs() - (sz))))
#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
likely(__user_ok((addr), (sz))))
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 0e51e69cf30d..3fde7de3ea67 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
{
struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
- clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
-
- clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
clk->cpumask = cpumask_of(cpu);
-
- clockevents_register_device(clk);
+ clockevents_config_and_register(clk, arc_get_core_freq(),
+ 0, ARC_TIMER_MAX);
/*
* setup the per-cpu timer IRQ handler - for all cpus
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 28d170060747..7ff5b5c183bb 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
+
+ /* handle zero-overhead-loop */
+ if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
+ regs->ret = regs->lp_start;
+ regs->lp_count--;
+ }
}
return 0;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3f7714d8d2d2..1ad6fb6c094d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2217,8 +2217,7 @@ config NEON
config KERNEL_MODE_NEON
bool "Support for NEON in kernel mode"
- default n
- depends on NEON
+ depends on NEON && AEABI
help
Say Y to include support for NEON in kernel mode.
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 19d6cd6f29f9..3a14ea8fe97e 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -148,7 +148,7 @@ AES_Te:
@ const AES_KEY *key) {
.align 5
ENTRY(AES_encrypt)
- sub r3,pc,#8 @ AES_encrypt
+ adr r3,AES_encrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
mov r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
.align 5
ENTRY(private_AES_set_encrypt_key)
_armv4_AES_set_encrypt_key:
- sub r3,pc,#8 @ AES_set_encrypt_key
+ adr r3,_armv4_AES_set_encrypt_key
teq r0,#0
moveq r0,#-1
beq .Labrt
@@ -843,7 +843,7 @@ AES_Td:
@ const AES_KEY *key) {
.align 5
ENTRY(AES_decrypt)
- sub r3,pc,#8 @ AES_decrypt
+ adr r3,AES_decrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
mov r11,r2
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f76027f66..72abdc541f38 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,6 +19,13 @@
#include <asm/unified.h>
#include <asm/compiler.h>
+#if __LINUX_ARM_ARCH__ < 6
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+#endif
+
#define VERIFY_READ 0
#define VERIFY_WRITE 1
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 74ad15d1a065..bc6bd9683ba4 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -442,10 +442,10 @@ local_restart:
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
- cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall
-2: mov why, #0 @ no longer a real syscall
+ mov why, #0 @ no longer a real syscall
b sys_ni_syscall @ not private func
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index de23a9beed13..39f89fbd5111 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -329,10 +329,10 @@
#ifdef CONFIG_CONTEXT_TRACKING
.if \save
stmdb sp!, {r0-r3, ip, lr}
- bl user_exit
+ bl context_tracking_user_exit
ldmia sp!, {r0-r3, ip, lr}
.else
- bl user_exit
+ bl context_tracking_user_exit
.endif
#endif
.endm
@@ -341,10 +341,10 @@
#ifdef CONFIG_CONTEXT_TRACKING
.if \save
stmdb sp!, {r0-r3, ip, lr}
- bl user_enter
+ bl context_tracking_user_enter
ldmia sp!, {r0-r3, ip, lr}
.else
- bl user_enter
+ bl context_tracking_user_enter
.endif
#endif
.endm
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 71e08baee209..c02ba4af599f 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -58,14 +58,14 @@ static const struct kvm_irq_level a15_vtimer_irq = {
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
- struct kvm_regs *cpu_reset;
+ struct kvm_regs *reset_regs;
const struct kvm_irq_level *cpu_vtimer_irq;
switch (vcpu->arch.target) {
case KVM_ARM_TARGET_CORTEX_A15:
if (vcpu->vcpu_id > a15_max_cpu_idx)
return -EINVAL;
- cpu_reset = &a15_regs_reset;
+ reset_regs = &a15_regs_reset;
vcpu->arch.midr = read_cpuid_id();
cpu_vtimer_irq = &a15_vtimer_irq;
break;
@@ -74,7 +74,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
/* Reset core registers */
- memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
+ memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
/* Reset CP15 registers */
kvm_reset_coprocs(vcpu);
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index c3cfa4116dc0..c6b40f386786 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -285,7 +285,7 @@ int __init mx27_clocks_init(unsigned long fref)
clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
clk_register_clkdev(clk[rtc_ipg_gate], NULL, "imx21-rtc");
clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
- clk_register_clkdev(clk[cpu_div], NULL, "cpufreq-cpu0.0");
+ clk_register_clkdev(clk[cpu_div], NULL, "cpu0");
clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index d9094b9a5185..7c0dc4540aa4 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -328,7 +328,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
- clk_register_clkdev(clk[cpu_podf], NULL, "cpufreq-cpu0.0");
+ clk_register_clkdev(clk[cpu_podf], NULL, "cpu0");
clk_register_clkdev(clk[iim_gate], "iim", NULL);
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 85a1b51346c8..90372a21087f 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -233,10 +233,15 @@ put_node:
of_node_put(np);
}
-static void __init imx6q_opp_init(struct device *cpu_dev)
+static void __init imx6q_opp_init(void)
{
struct device_node *np;
+ struct device *cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_warn("failed to get cpu0 device\n");
+ return;
+ }
np = of_node_get(cpu_dev->of_node);
if (!np) {
pr_warn("failed to find cpu0 node\n");
@@ -268,7 +273,7 @@ static void __init imx6q_init_late(void)
imx6q_cpuidle_init();
if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
- imx6q_opp_init(&imx6q_cpufreq_pdev.dev);
+ imx6q_opp_init();
platform_device_register(&imx6q_cpufreq_pdev);
}
}
diff --git a/arch/arm/mach-shmobile/clock-r8a73a4.c b/arch/arm/mach-shmobile/clock-r8a73a4.c
index 8ea5ef6c79cc..5bd2e851e3c7 100644
--- a/arch/arm/mach-shmobile/clock-r8a73a4.c
+++ b/arch/arm/mach-shmobile/clock-r8a73a4.c
@@ -555,7 +555,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("pll2h", &pll2h_clk),
/* CPU clock */
- CLKDEV_DEV_ID("cpufreq-cpu0", &z_clk),
+ CLKDEV_DEV_ID("cpu0", &z_clk),
/* DIV6 */
CLKDEV_CON_ID("zb", &div6_clks[DIV6_ZB]),
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 1942eaef5181..c92c023f0d27 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -616,7 +616,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */
/* DIV4 clocks */
- CLKDEV_DEV_ID("cpufreq-cpu0", &div4_clks[DIV4_Z]),
+ CLKDEV_DEV_ID("cpu0", &div4_clks[DIV4_Z]),
/* DIV6 clocks */
CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index d22af851f3f6..fd7980743890 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -1,5 +1,19 @@
generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += delay.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
generic-y += exec.h
-generic-y += trace_clock.h
+generic-y += futex.h
+generic-y += irq_regs.h
generic-y += param.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += percpu.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += xor.h
diff --git a/arch/avr32/include/asm/cputime.h b/arch/avr32/include/asm/cputime.h
deleted file mode 100644
index e87e0f81cbeb..000000000000
--- a/arch/avr32/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_CPUTIME_H
-#define __ASM_AVR32_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __ASM_AVR32_CPUTIME_H */
diff --git a/arch/avr32/include/asm/delay.h b/arch/avr32/include/asm/delay.h
deleted file mode 100644
index 9670e127b7b2..000000000000
--- a/arch/avr32/include/asm/delay.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/delay.h>
diff --git a/arch/avr32/include/asm/device.h b/arch/avr32/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/avr32/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/avr32/include/asm/div64.h b/arch/avr32/include/asm/div64.h
deleted file mode 100644
index d7ddd4fdeca6..000000000000
--- a/arch/avr32/include/asm/div64.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_DIV64_H
-#define __ASM_AVR32_DIV64_H
-
-#include <asm-generic/div64.h>
-
-#endif /* __ASM_AVR32_DIV64_H */
diff --git a/arch/avr32/include/asm/emergency-restart.h b/arch/avr32/include/asm/emergency-restart.h
deleted file mode 100644
index 3e7e014776ba..000000000000
--- a/arch/avr32/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_EMERGENCY_RESTART_H
-#define __ASM_AVR32_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* __ASM_AVR32_EMERGENCY_RESTART_H */
diff --git a/arch/avr32/include/asm/futex.h b/arch/avr32/include/asm/futex.h
deleted file mode 100644
index 10419f14a68a..000000000000
--- a/arch/avr32/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_FUTEX_H
-#define __ASM_AVR32_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif /* __ASM_AVR32_FUTEX_H */
diff --git a/arch/avr32/include/asm/irq_regs.h b/arch/avr32/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/avr32/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/avr32/include/asm/local.h b/arch/avr32/include/asm/local.h
deleted file mode 100644
index 1c1619694da3..000000000000
--- a/arch/avr32/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_LOCAL_H
-#define __ASM_AVR32_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* __ASM_AVR32_LOCAL_H */
diff --git a/arch/avr32/include/asm/local64.h b/arch/avr32/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/avr32/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/avr32/include/asm/percpu.h b/arch/avr32/include/asm/percpu.h
deleted file mode 100644
index 69227b4cd0d4..000000000000
--- a/arch/avr32/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_PERCPU_H
-#define __ASM_AVR32_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ASM_AVR32_PERCPU_H */
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
deleted file mode 100644
index a5902d9834e8..000000000000
--- a/arch/avr32/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_SCATTERLIST_H
-#define __ASM_AVR32_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* __ASM_AVR32_SCATTERLIST_H */
diff --git a/arch/avr32/include/asm/sections.h b/arch/avr32/include/asm/sections.h
deleted file mode 100644
index aa14252e4181..000000000000
--- a/arch/avr32/include/asm/sections.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_SECTIONS_H
-#define __ASM_AVR32_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-#endif /* __ASM_AVR32_SECTIONS_H */
diff --git a/arch/avr32/include/asm/topology.h b/arch/avr32/include/asm/topology.h
deleted file mode 100644
index 5b766cbb4806..000000000000
--- a/arch/avr32/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_TOPOLOGY_H
-#define __ASM_AVR32_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* __ASM_AVR32_TOPOLOGY_H */
diff --git a/arch/avr32/include/asm/xor.h b/arch/avr32/include/asm/xor.h
deleted file mode 100644
index 99c87aa0af4f..000000000000
--- a/arch/avr32/include/asm/xor.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_XOR_H
-#define _ASM_XOR_H
-
-#include <asm-generic/xor.h>
-
-#endif
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index c2731003edef..42a53e740a7e 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -289,7 +289,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.cpu_context.r0 = arg;
p->thread.cpu_context.r1 = usp; /* fn */
- p->thread.cpu_context.r2 = syscall_return;
+ p->thread.cpu_context.r2 = (unsigned long)syscall_return;
p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
childregs->sr = MODE_SUPERVISOR;
} else {
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 869a1c6ffeee..12f828ad5058 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -98,7 +98,14 @@ static void comparator_mode(enum clock_event_mode mode,
case CLOCK_EVT_MODE_SHUTDOWN:
sysreg_write(COMPARE, 0);
pr_debug("%s: stop\n", evdev->name);
- cpu_idle_poll_ctrl(false);
+ if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
+ evdev->mode == CLOCK_EVT_MODE_RESUME) {
+ /*
+ * Only disable idle poll if we have forced that
+ * in a previous call.
+ */
+ cpu_idle_poll_ctrl(false);
+ }
break;
default:
BUG();
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 51680d15ca8e..d445d060e346 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -187,7 +187,7 @@
/*
* MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
* has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
* cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
*/
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f25a7e9f8cbc..5f8b95512580 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -308,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
{
int i;
- /* Make sure that gcc doesn't leave the empty loop body. */
- for (i = 0; i < nelems; i++, sg++) {
- if (cpu_needs_post_dma_flush(dev))
+ if (cpu_needs_post_dma_flush(dev))
+ for (i = 0; i < nelems; i++, sg++)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
- }
}
static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -321,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
{
int i;
- /* Make sure that gcc doesn't leave the empty loop body. */
- for (i = 0; i < nelems; i++, sg++) {
- if (!plat_device_is_coherent(dev))
+ if (!plat_device_is_coherent(dev))
+ for (i = 0; i < nelems; i++, sg++)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
- }
}
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
index eb59bfe23e85..93c9980e1b6b 100644
--- a/arch/openrisc/include/asm/prom.h
+++ b/arch/openrisc/include/asm/prom.h
@@ -14,53 +14,9 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-
-#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
-
#ifndef _ASM_OPENRISC_PROM_H
#define _ASM_OPENRISC_PROM_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#include <asm/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/atomic.h>
-#include <linux/of_irq.h>
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/proc_fs.h>
-#include <linux/platform_device.h>
#define HAVE_ARCH_DEVTREE_FIXUPS
-/* Other Prototypes */
-extern int early_uartlite_console(void);
-
-/* Parse the ibm,dma-window property of an OF node into the busno, phys and
- * size parameters.
- */
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
- unsigned long *busno, unsigned long *phys, unsigned long *size);
-
-extern void kdump_move_device_tree(void);
-
-/* Get the MAC address */
-extern const void *of_get_mac_address(struct device_node *np);
-
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev: the device whose interrupt is to be resolved
- * @out_irq: structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-struct pci_dev;
-extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index d10d27a720c0..00c0ed333a3d 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -182,6 +182,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
+
+ acc_type = parisc_acctyp(code, regs->iir);
+
if (acc_type & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
retry:
@@ -196,8 +199,6 @@ retry:
good_area:
- acc_type = parisc_acctyp(code,regs->iir);
-
if ((vma->vm_flags & acc_type) != acc_type)
goto bad_area;
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 6a15c968d214..15ca2255f438 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
-src-plat-y := of.c
+src-plat-y := of.c epapr.c
src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
treeboot-walnut.c cuboot-acadia.c \
cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
prpmc2800.c
src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
-src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
+src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644
index 000000000000..c10191006673
--- /dev/null
+++ b/arch/powerpc/boot/epapr-wrapper.c
@@ -0,0 +1,9 @@
+extern void epapr_platform_init(unsigned long r3, unsigned long r4,
+ unsigned long r5, unsigned long r6,
+ unsigned long r7);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ epapr_platform_init(r3, r4, r5, r6, r7);
+}
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c
index 06c1961bd124..02e91aa2194a 100644
--- a/arch/powerpc/boot/epapr.c
+++ b/arch/powerpc/boot/epapr.c
@@ -48,8 +48,8 @@ static void platform_fixups(void)
fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
}
-void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
{
epapr_magic = r6;
ima_size = r7;
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index 61d9899aa0d0..62e2f43ec1df 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -26,6 +26,9 @@
static unsigned long claim_base;
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7);
+
static void *of_try_claim(unsigned long size)
{
unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
}
}
-void platform_init(unsigned long a1, unsigned long a2, void *promptr)
+static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
{
platform_ops.image_hdr = of_image_hdr;
platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
loader_info.initrd_size = a2;
}
}
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ /* Detect OF vs. ePAPR boot */
+ if (r5)
+ of_platform_init(r3, r4, (void *)r5);
+ else
+ epapr_platform_init(r3, r4, r5, r6, r7);
+}
+
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6761c746048d..cd7af841ba05 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -148,18 +148,18 @@ make_space=y
case "$platform" in
pseries)
- platformo=$object/of.o
+ platformo="$object/of.o $object/epapr.o"
link_address='0x4000000'
;;
maple)
- platformo=$object/of.o
+ platformo="$object/of.o $object/epapr.o"
link_address='0x400000'
;;
pmac|chrp)
- platformo=$object/of.o
+ platformo="$object/of.o $object/epapr.o"
;;
coff)
- platformo="$object/crt0.o $object/of.o"
+ platformo="$object/crt0.o $object/of.o $object/epapr.o"
lds=$object/zImage.coff.lds
link_address='0x500000'
pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
platformo="$object/treeboot-iss4xx.o"
;;
epapr)
+ platformo="$object/epapr.o $object/epapr-wrapper.o"
link_address='0x20000000'
pie=-pie
;;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0e40843a1c6e..41f13cec8a8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
extern void irq_ctx_init(void);
extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_irq(int irq, void *p1,
- struct thread_info *tp, void *func);
+extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
extern void do_IRQ(struct pt_regs *regs);
+extern void __do_irq(struct pt_regs *regs);
int irq_choose_cpu(const struct cpumask *mask);
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e378cccfca55..ce4de5aed7b5 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -149,8 +149,6 @@ typedef struct {
struct thread_struct {
unsigned long ksp; /* Kernel stack pointer */
- unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
-
#ifdef CONFIG_PPC64
unsigned long ksp_vsid;
#endif
@@ -162,6 +160,7 @@ struct thread_struct {
#endif
#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
+ unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
@@ -321,7 +320,6 @@ struct thread_struct {
#else
#define INIT_THREAD { \
.ksp = INIT_SP, \
- .ksp_limit = INIT_SP_LIMIT, \
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.fs = KERNEL_DS, \
.fpr = {{0}}, \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d8958be5f31a..502c7a4e73f7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -80,10 +80,11 @@ int main(void)
DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
+ DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
+ DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
#endif /* CONFIG_PPC64 */
DEFINE(KSP, offsetof(struct thread_struct, ksp));
- DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE
DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c69440cef7af..57d286a78f86 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,50 +441,6 @@ void migrate_irqs(void)
}
#endif
-static inline void handle_one_irq(unsigned int irq)
-{
- struct thread_info *curtp, *irqtp;
- unsigned long saved_sp_limit;
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- if (!desc)
- return;
-
- /* Switch to the irq stack to handle this */
- curtp = current_thread_info();
- irqtp = hardirq_ctx[smp_processor_id()];
-
- if (curtp == irqtp) {
- /* We're already on the irq stack, just handle it */
- desc->handle_irq(irq, desc);
- return;
- }
-
- saved_sp_limit = current->thread.ksp_limit;
-
- irqtp->task = curtp->task;
- irqtp->flags = 0;
-
- /* Copy the softirq bits in preempt_count so that the
- * softirq checks work in the hardirq context. */
- irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
- (curtp->preempt_count & SOFTIRQ_MASK);
-
- current->thread.ksp_limit = (unsigned long)irqtp +
- _ALIGN_UP(sizeof(struct thread_info), 16);
-
- call_handle_irq(irq, desc, irqtp, desc->handle_irq);
- current->thread.ksp_limit = saved_sp_limit;
- irqtp->task = NULL;
-
- /* Set any flag that may have been set on the
- * alternate stack
- */
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
-}
-
static inline void check_stack_overflow(void)
{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
#endif
}
-void do_IRQ(struct pt_regs *regs)
+void __do_irq(struct pt_regs *regs)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
+ struct irq_desc *desc;
unsigned int irq;
irq_enter();
@@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
*/
irq = ppc_md.get_irq();
- /* We can hard enable interrupts now */
+ /* We can hard enable interrupts now to allow perf interrupts */
may_hard_irq_enable();
/* And finally process it */
- if (irq != NO_IRQ)
- handle_one_irq(irq);
- else
+ if (unlikely(irq == NO_IRQ))
__get_cpu_var(irq_stat).spurious_irqs++;
+ else {
+ desc = irq_to_desc(irq);
+ if (likely(desc))
+ desc->handle_irq(irq, desc);
+ }
trace_irq_exit(regs);
irq_exit();
+}
+
+void do_IRQ(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct thread_info *curtp, *irqtp;
+
+ /* Switch to the irq stack to handle this */
+ curtp = current_thread_info();
+ irqtp = hardirq_ctx[raw_smp_processor_id()];
+
+ /* Already there ? */
+ if (unlikely(curtp == irqtp)) {
+ __do_irq(regs);
+ set_irq_regs(old_regs);
+ return;
+ }
+
+ /* Prepare the thread_info in the irq stack */
+ irqtp->task = curtp->task;
+ irqtp->flags = 0;
+
+ /* Copy the preempt_count so that the [soft]irq checks work. */
+ irqtp->preempt_count = curtp->preempt_count;
+
+ /* Switch stack and call */
+ call_do_irq(regs, irqtp);
+
+ /* Restore stack limit */
+ irqtp->task = NULL;
+
+ /* Copy back updates to the thread_info */
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
+
set_irq_regs(old_regs);
}
@@ -592,28 +586,22 @@ void irq_ctx_init(void)
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
tp = softirq_ctx[i];
tp->cpu = i;
- tp->preempt_count = 0;
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
tp = hardirq_ctx[i];
tp->cpu = i;
- tp->preempt_count = HARDIRQ_OFFSET;
}
}
static inline void do_softirq_onstack(void)
{
struct thread_info *curtp, *irqtp;
- unsigned long saved_sp_limit = current->thread.ksp_limit;
curtp = current_thread_info();
irqtp = softirq_ctx[smp_processor_id()];
irqtp->task = curtp->task;
irqtp->flags = 0;
- current->thread.ksp_limit = (unsigned long)irqtp +
- _ALIGN_UP(sizeof(struct thread_info), 16);
call_do_softirq(irqtp);
- current->thread.ksp_limit = saved_sp_limit;
irqtp->task = NULL;
/* Set any flag that may have been set on the
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 777d999f563b..2b0ad9845363 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -36,26 +36,41 @@
.text
+/*
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
_GLOBAL(call_do_softirq)
mflr r0
stw r0,4(r1)
+ lwz r10,THREAD+KSP_LIMIT(r2)
+ addi r11,r3,THREAD_INFO_GAP
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
mr r1,r3
+ stw r10,8(r1)
+ stw r11,THREAD+KSP_LIMIT(r2)
bl __do_softirq
+ lwz r10,8(r1)
lwz r1,0(r1)
lwz r0,4(r1)
+ stw r10,THREAD+KSP_LIMIT(r2)
mtlr r0
blr
-_GLOBAL(call_handle_irq)
+_GLOBAL(call_do_irq)
mflr r0
stw r0,4(r1)
- mtctr r6
- stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
- mr r1,r5
- bctrl
+ lwz r10,THREAD+KSP_LIMIT(r2)
+ addi r11,r3,THREAD_INFO_GAP
+ stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+ mr r1,r4
+ stw r10,8(r1)
+ stw r11,THREAD+KSP_LIMIT(r2)
+ bl __do_irq
+ lwz r10,8(r1)
lwz r1,0(r1)
lwz r0,4(r1)
+ stw r10,THREAD+KSP_LIMIT(r2)
mtlr r0
blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 971d7e78aff2..e59caf874d05 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
-_GLOBAL(call_handle_irq)
- ld r8,0(r6)
+_GLOBAL(call_do_irq)
mflr r0
std r0,16(r1)
- mtctr r8
- stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
- mr r1,r5
- bctrl
+ stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+ mr r1,r4
+ bl .__do_irq
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6f428da53e20..96d2fdf3aa9e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
p->thread.ksp = sp;
+#ifdef CONFIG_PPC32
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16);
-
+#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
p->thread.ptrace_bps[0] = NULL;
#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 12e656ffe60e..5fe2842e8bab 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
static cell_t __initdata regbuf[1024];
+static bool rtas_has_query_cpu_stopped;
+
/*
* Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
&val, sizeof(val));
+ /* Check if it supports "query-cpu-stopped-state" */
+ if (prom_getprop(rtas_node, "query-cpu-stopped-state",
+ &val, sizeof(val)) != PROM_ERROR)
+ rtas_has_query_cpu_stopped = true;
+
#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
/* PowerVN takeover hack */
prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
= (void *) LOW_ADDR(__secondary_hold_acknowledge);
unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
+ /*
+ * On pseries, if RTAS supports "query-cpu-stopped-state",
+ * we skip this stage, the CPUs will be started by the
+ * kernel using RTAS.
+ */
+ if ((of_platform == PLATFORM_PSERIES ||
+ of_platform == PLATFORM_PSERIES_LPAR) &&
+ rtas_has_query_cpu_stopped) {
+ prom_printf("prom_hold_cpus: skipped\n");
+ return;
+ }
+
prom_debug("prom_hold_cpus: start...\n");
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* On non-powermacs, put all CPUs in spin-loops.
*
* PowerMacs use a different mechanism to spin CPUs
+ *
+ * (This must be done after instanciating RTAS)
*/
if (of_platform != PLATFORM_POWERMAC &&
of_platform != PLATFORM_OPAL)
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a7ee978fb860..b1faa1593c90 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
*/
if ((ra == 1) && !(regs->msr & MSR_PR) \
&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
+#ifdef CONFIG_PPC32
/*
* Check if we will touch kernel sack overflow
*/
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
err = -EINVAL;
break;
}
-
+#endif /* CONFIG_PPC32 */
/*
* Check if we already set since that means we'll
* lose the previous value.
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1c1771a40250..24f58cb0a543 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
alloc_bootmem_cpumask_var(&of_spin_mask);
- /* Mark threads which are still spinning in hold loops. */
- if (cpu_has_feature(CPU_FTR_SMT)) {
- for_each_present_cpu(i) {
- if (cpu_thread_in_core(i) == 0)
- cpumask_set_cpu(i, of_spin_mask);
- }
- } else {
- cpumask_copy(of_spin_mask, cpu_present_mask);
+ /*
+ * Mark threads which are still spinning in hold loops
+ *
+ * We know prom_init will not have started them if RTAS supports
+ * query-cpu-stopped-state.
+ */
+ if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
+ if (cpu_has_feature(CPU_FTR_SMT)) {
+ for_each_present_cpu(i) {
+ if (cpu_thread_in_core(i) == 0)
+ cpumask_set_cpu(i, of_spin_mask);
+ }
+ } else
+ cpumask_copy(of_spin_mask, cpu_present_mask);
+
+ cpumask_clear_cpu(boot_cpuid, of_spin_mask);
}
- cpumask_clear_cpu(boot_cpuid, of_spin_mask);
-
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
smp_ops->give_timebase = rtas_give_timebase;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index dcc6ac2d8026..7143793859fa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -93,6 +93,7 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+ select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
@@ -102,7 +103,6 @@ config S390
select GENERIC_TIME_VSYSCALL_OLD
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
- select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
index 688271f5f2e4..458c1f7fbc18 100644
--- a/arch/s390/include/asm/mutex.h
+++ b/arch/s390/include/asm/mutex.h
@@ -7,5 +7,3 @@
*/
#include <asm-generic/mutex-dec.h>
-
-#define arch_mutex_cpu_relax() barrier()
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 0eb37505cab1..ca7821f07260 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -198,6 +198,8 @@ static inline void cpu_relax(void)
barrier();
}
+#define arch_mutex_cpu_relax() barrier()
+
static inline void psw_set_key(unsigned int key)
{
asm volatile("spka 0(%0)" : : "d" (key));
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 701fe8c59e1f..83e5d216105e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
extern int arch_spin_trylock_retry(arch_spinlock_t *);
extern void arch_spin_relax(arch_spinlock_t *lock);
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.owner_cpu == 0;
+}
+
static inline void arch_spin_lock(arch_spinlock_t *lp)
{
int old;
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index a1be70db75fe..305f7ee1f382 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -2,6 +2,7 @@ menu "Machine selection"
config SCORE
def_bool y
+ select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
select GENERIC_ATOMIC64
@@ -110,3 +111,6 @@ source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+
+config NO_IOMEM
+ def_bool y
diff --git a/arch/score/Makefile b/arch/score/Makefile
index 974aefe86123..9e3e060290e0 100644
--- a/arch/score/Makefile
+++ b/arch/score/Makefile
@@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
#
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
-KBUILD_AFLAGS_MODULE += -mlong-calls
-KBUILD_CFLAGS_MODULE += -mlong-calls
+KBUILD_AFLAGS_MODULE +=
+KBUILD_CFLAGS_MODULE +=
LDFLAGS += --oformat elf32-littlescore
LDFLAGS_vmlinux += -G0 -static -nostdlib
diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h
index f909ac3144a4..961bd64015a8 100644
--- a/arch/score/include/asm/checksum.h
+++ b/arch/score/include/asm/checksum.h
@@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
__wsum sum)
{
__asm__ __volatile__(
- ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t"
- ".set\tnoat\n\t"
- "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t"
- "sltu\t$1, %0, %5\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %6\t\t\t# csum\n\t"
- "sltu\t$1, %0, %6\n\t"
- "lw\t%1, 0(%2)\t\t\t# four words source address\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 4(%2)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 8(%2)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 12(%2)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 0(%3)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 4(%3)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 8(%3)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "lw\t%1, 12(%3)\n\t"
- "addu\t%0, $1\n\t"
- "addu\t%0, %1\n\t"
- "sltu\t$1, %0, %1\n\t"
- "addu\t%0, $1\t\t\t# Add final carry\n\t"
- ".set\tnoat\n\t"
- ".set\tnoreorder"
+ ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
+ "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
+ "cmp.c\t%5, %0\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %6\t\t\t# csum\n\t"
+ "cmp.c\t%6, %0\n\t"
+ "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "1:lw\t%1, [%2, 4]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%2,8]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%2, 12]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0,%1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%3, 0]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%3, 4]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%3, 8]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "lw\t%1, [%3, 12]\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:add\t%0, %0, %1\n\t"
+ "cmp.c\t%1, %0\n\t"
+ "bleu 1f\n\t"
+ "addi\t%0, 0x1\n\t"
+ "1:\n\t"
+ ".set\toptimize"
: "=r" (sum), "=r" (proto)
: "r" (saddr), "r" (daddr),
"0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
diff --git a/arch/score/include/asm/io.h b/arch/score/include/asm/io.h
index fbbfd7132e3b..574c8827abe2 100644
--- a/arch/score/include/asm/io.h
+++ b/arch/score/include/asm/io.h
@@ -5,5 +5,4 @@
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
-
#endif /* _ASM_SCORE_IO_H */
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 059a61b7071b..716b3fd1d863 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -2,7 +2,7 @@
#define _ASM_SCORE_PGALLOC_H
#include <linux/mm.h>
-
+#include <linux/highmem.h>
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
index 7234ed09b7b7..befb87d30a89 100644
--- a/arch/score/kernel/entry.S
+++ b/arch/score/kernel/entry.S
@@ -264,7 +264,7 @@ resume_kernel:
disable_irq
lw r8, [r28, TI_PRE_COUNT]
cmpz.c r8
- bne r8, restore_all
+ bne restore_all
need_resched:
lw r8, [r28, TI_FLAGS]
andri.c r9, r8, _TIF_NEED_RESCHED
@@ -415,7 +415,7 @@ ENTRY(handle_sys)
sw r9, [r0, PT_EPC]
cmpi.c r27, __NR_syscalls # check syscall number
- bgeu illegal_syscall
+ bcs illegal_syscall
slli r8, r27, 2 # get syscall routine
la r11, sys_call_table
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index f4c6d02421d3..a1519ad3d49d 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.reg0 = (unsigned long) childregs;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
- p->thread->reg12 = usp;
- p->thread->reg13 = arg;
+ p->thread.reg12 = usp;
+ p->thread.reg13 = arg;
p->thread.reg3 = (unsigned long) ret_from_kernel_thread;
} else {
*childregs = *current_pt_regs();
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 62d6b153ffa2..4d9ac8406f32 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -851,7 +851,7 @@ void ldom_reboot(const char *boot_command)
strcpy(full_boot_str, "boot ");
strlcpy(full_boot_str + strlen("boot "), boot_command,
- sizeof(full_boot_str + strlen("boot ")));
+ sizeof(full_boot_str));
len = strlen(full_boot_str);
if (reboot_data_supported) {
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 6aef9fbc09b7..b913915e8e63 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
}
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
{
unsigned long pfn;
- int ret = 0;
+ int ret;
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
- if (unlikely(mfn >= machine_to_phys_nr)) {
- pfn = ~0;
- goto try_override;
- }
- pfn = 0;
+ if (unlikely(mfn >= machine_to_phys_nr))
+ return ~0;
+
/*
* The array access can fail (e.g., device space beyond end of RAM).
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-try_override:
- /* ret might be < 0 if there are no entries in the m2p for mfn */
if (ret < 0)
- pfn = ~0;
- else if (get_phys_to_machine(pfn) != mfn)
+ return ~0;
+
+ return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) != mfn) {
/*
* If this appears to be a foreign mfn (because the pfn
* doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
*/
pfn = m2p_find_override_pfn(mfn, ~0);
+ }
/*
* pfn is ~0 if there are no entries in the m2p for mfn or if the
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8355c84b9729..897783b3302a 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
err = amd_pmu_init();
break;
default:
- return 0;
+ err = -ENOTSUPP;
}
if (err != 0) {
pr_cont("no PMU driver, software events only.\n");
@@ -1883,9 +1883,9 @@ static struct pmu pmu = {
void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
- userpg->cap_usr_time = 0;
- userpg->cap_usr_time_zero = 0;
- userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
+ userpg->cap_user_time = 0;
+ userpg->cap_user_time_zero = 0;
+ userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
userpg->pmc_width = x86_pmu.cntval_bits;
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return;
- userpg->cap_usr_time = 1;
+ userpg->cap_user_time = 1;
userpg->time_mult = this_cpu_read(cyc2ns);
userpg->time_shift = CYC2NS_SCALE_FACTOR;
userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
if (sched_clock_stable && !check_tsc_disabled()) {
- userpg->cap_usr_time_zero = 1;
+ userpg->cap_user_time_zero = 1;
userpg->time_zero = this_cpu_read(cyc2ns_offset);
}
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 9db76c31b3c3..f31a1655d1ff 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
break;
case 55: /* Atom 22nm "Silvermont" */
+ case 77: /* Avoton "Silvermont" */
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 8ed44589b0e4..4118f9f68315 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
box->hrtimer.function = uncore_pmu_hrtimer;
}
-struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
+static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
{
struct intel_uncore_box *box;
int i, size;
size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
- box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
+ box = kzalloc_node(size, GFP_KERNEL, node);
if (!box)
return NULL;
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
struct intel_uncore_box *fake_box;
int ret = -EINVAL, n;
- fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
+ fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
if (!fake_box)
return -ENOMEM;
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
}
type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
- box = uncore_alloc_box(type, 0);
+ box = uncore_alloc_box(type, NUMA_NO_NODE);
if (!box)
return -ENOMEM;
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
if (pmu->func_id < 0)
pmu->func_id = j;
- box = uncore_alloc_box(type, cpu);
+ box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
return -ENOMEM;
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7123b5df479d..af99f71aeb7f 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
/* need to apply patch? */
if (rev >= mc_amd->hdr.patch_id) {
c->microcode = rev;
+ uci->cpu_sig.rev = rev;
return 0;
}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 563ed91e6faa..e643e744e4d8 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
{ /* Handle problems with rebooting on the Precision M6600. */
.callback = set_pci_reboot,
- .ident = "Dell OptiPlex 990",
+ .ident = "Dell Precision M6600",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
},
},
+ { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
+ .callback = set_pci_reboot,
+ .ident = "Dell PowerEdge C6100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ },
+ },
+ { /* Some C6100 machines were shipped with vendor being 'Dell'. */
+ .callback = set_pci_reboot,
+ .ident = "Dell PowerEdge C6100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a1216de9ffda..3b8e7459dd4d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5345,7 +5345,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
* There are errata that may cause this bit to not be set:
* AAK134, BY25.
*/
- if (exit_qualification & INTR_INFO_UNBLOCK_NMI)
+ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ cpu_has_virtual_nmis() &&
+ (exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 90f6ed127096..c7e22ab29a5a 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
+ if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+#ifdef CONFIG_X86_64
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+#endif
+ continue;
+ }
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8b901e8d782d..a61c7d5811be 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
- int ret = 0;
pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
* frontend pages while they are being shared with the backend,
* because mfn_to_pfn (that ends up being called by GUPF) will
* return the backend pfn rather than the frontend pfn. */
- ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
- if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) == mfn)
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
- int ret = 0;
pfn = page_to_pfn(page);
mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
* pfn again. */
mfn &= ~FOREIGN_FRAME_BIT;
- ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
- if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
m2p_find_override(mfn) == NULL)
set_phys_to_machine(pfn, mfn);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 253f63fceea1..be6b86078957 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
}
+/*
+ * Our init of PV spinlocks is split in two init functions due to us
+ * using paravirt patching and jump labels patching and having to do
+ * all of this before SMP code is invoked.
+ *
+ * The paravirt patching needs to be done _before_ the alternative asm code
+ * is started, otherwise we would not patch the core kernel code.
+ */
void __init xen_init_spinlocks(void)
{
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
return;
}
- static_key_slow_inc(&paravirt_ticketlocks_enabled);
-
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
pv_lock_ops.unlock_kick = xen_unlock_kick;
}
+/*
+ * While the jump_label init code needs to happend _after_ the jump labels are
+ * enabled and before SMP is started. Hence we use pre-SMP initcall level
+ * init. We cannot do it in xen_init_spinlocks as that is done before
+ * jump labels are activated.
+ */
+static __init int xen_init_spinlocks_jump(void)
+{
+ if (!xen_pvspin)
+ return 0;
+
+ static_key_slow_inc(&paravirt_ticketlocks_enabled);
+ return 0;
+}
+early_initcall(xen_init_spinlocks_jump);
+
static __init int xen_parse_nopvspin(char *arg)
{
xen_pvspin = false;