summaryrefslogtreecommitdiff
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/cache.h4
-rw-r--r--include/asm-x86_64/cpufeature.h1
-rw-r--r--include/asm-x86_64/i387.h20
-rw-r--r--include/asm-x86_64/mmzone.h3
-rw-r--r--include/asm-x86_64/percpu.h2
-rw-r--r--include/asm-x86_64/unistd.h6
6 files changed, 30 insertions, 6 deletions
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index c8043a16152e..f8dff1c67538 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -20,8 +20,8 @@
__attribute__((__section__(".data.page_aligned")))
#endif
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
-
#endif
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
#endif
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index 76bb6193ae91..662964b74e34 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -64,6 +64,7 @@
#define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */
#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
+#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index 876eb9a2fe78..cba8a3b0cded 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -72,6 +72,23 @@ extern int set_fpregs(struct task_struct *tsk,
#define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
#define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
+#define X87_FSW_ES (1 << 7) /* Exception Summary */
+
+/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+ values. The kernel data segment can be sometimes 0 and sometimes
+ new user value. Both should be ok.
+ Use the PDA as safe address because it should be already in L1. */
+static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
+{
+ if (unlikely(fx->swd & X87_FSW_ES))
+ asm volatile("fnclex");
+ alternative_input(ASM_NOP8 ASM_NOP2,
+ " emms\n" /* clear stack tags */
+ " fildl %%gs:0", /* load to clear state */
+ X86_FEATURE_FXSAVE_LEAK);
+}
+
static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
{
int err;
@@ -119,6 +136,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
#endif
if (unlikely(err))
__clear_user(fx, sizeof(struct i387_fxsave_struct));
+ /* No need to clear here because the caller clears USED_MATH */
return err;
}
@@ -149,7 +167,7 @@ static inline void __fxsave_clear(struct task_struct *tsk)
"i" (offsetof(__typeof__(*tsk),
thread.i387.fxsave)));
#endif
- __asm__ __volatile__("fnclex");
+ clear_fpu_state(&tsk->thread.i387.fxsave);
}
static inline void kernel_fpu_begin(void)
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 6b18cd8f293d..6944e7122df5 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -12,7 +12,8 @@
#include <asm/smp.h>
-#define NODEMAPSIZE 0xfff
+/* Should really switch to dynamic allocation at some point */
+#define NODEMAPSIZE 0x4fff
/* Simple perfect hash to map physical addresses to node numbers */
struct memnode {
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 4405b4adeaba..7f33aaf9f7b1 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -26,7 +26,7 @@
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for_each_cpu(__i) \
+ for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset(__i), \
(src), (size)); \
} while (0)
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index d86494e23b63..feb77cb8c044 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -613,8 +613,12 @@ __SYSCALL(__NR_get_robust_list, sys_get_robust_list)
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 276
__SYSCALL(__NR_tee, sys_tee)
+#define __NR_sync_file_range 277
+__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
+#define __NR_vmsplice 278
+__SYSCALL(__NR_vmsplice, sys_vmsplice)
-#define __NR_syscall_max __NR_tee
+#define __NR_syscall_max __NR_vmsplice
#ifndef __NO_STUBS