summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/paravirt_patch.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-04-25 14:03:31 +0300
committerIngo Molnar <mingo@kernel.org>2019-04-29 17:05:49 +0300
commit1fc654cf6e04b402ba9c4327b2919ea864037e7a (patch)
treed012f50e6390e2cbd533035f5b9c18ac9e4a909d /arch/x86/kernel/paravirt_patch.c
parentfc93dfd9345bb8b29a62b21cb0447dd1a3815f91 (diff)
downloadlinux-1fc654cf6e04b402ba9c4327b2919ea864037e7a.tar.xz
x86/paravirt: Standardize 'insn_buff' variable names
We currently have 6 (!) separate naming variants to name temporary instruction buffers that are used for code patching: - insnbuf - insnbuff - insn_buff - insn_buffer - ibuf - ibuffer These are used as local variables, percpu fields and function parameters. Standardize all the names to a single variant: 'insn_buff'. Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/paravirt_patch.c')
-rw-r--r--arch/x86/kernel/paravirt_patch.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c
index 37b1d43d1e17..3eff63c090d2 100644
--- a/arch/x86/kernel/paravirt_patch.c
+++ b/arch/x86/kernel/paravirt_patch.c
@@ -10,12 +10,12 @@
#define PEND(d, m) \
(PSTART(d, m) + sizeof(patch_data_##d.m))
-#define PATCH(d, m, ibuf, len) \
- paravirt_patch_insns(ibuf, len, PSTART(d, m), PEND(d, m))
+#define PATCH(d, m, insn_buff, len) \
+ paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
-#define PATCH_CASE(ops, m, data, ibuf, len) \
+#define PATCH_CASE(ops, m, data, insn_buff, len) \
case PARAVIRT_PATCH(ops.m): \
- return PATCH(data, ops##_##m, ibuf, len)
+ return PATCH(data, ops##_##m, insn_buff, len)
#ifdef CONFIG_PARAVIRT_XXL
struct patch_xxl {
@@ -57,10 +57,10 @@ static const struct patch_xxl patch_data_xxl = {
# endif
};
-unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len)
+unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
{
#ifdef CONFIG_X86_64
- return PATCH(xxl, mov64, insnbuf, len);
+ return PATCH(xxl, mov64, insn_buff, len);
#endif
return 0;
}
@@ -83,44 +83,44 @@ static const struct patch_lock patch_data_lock = {
};
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-unsigned int native_patch(u8 type, void *ibuf, unsigned long addr,
+unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
unsigned int len)
{
switch (type) {
#ifdef CONFIG_PARAVIRT_XXL
- PATCH_CASE(irq, restore_fl, xxl, ibuf, len);
- PATCH_CASE(irq, save_fl, xxl, ibuf, len);
- PATCH_CASE(irq, irq_enable, xxl, ibuf, len);
- PATCH_CASE(irq, irq_disable, xxl, ibuf, len);
+ PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
+ PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
+ PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
+ PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
- PATCH_CASE(mmu, read_cr2, xxl, ibuf, len);
- PATCH_CASE(mmu, read_cr3, xxl, ibuf, len);
- PATCH_CASE(mmu, write_cr3, xxl, ibuf, len);
+ PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
+ PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
+ PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
# ifdef CONFIG_X86_64
- PATCH_CASE(cpu, usergs_sysret64, xxl, ibuf, len);
- PATCH_CASE(cpu, swapgs, xxl, ibuf, len);
- PATCH_CASE(cpu, wbinvd, xxl, ibuf, len);
+ PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
+ PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
+ PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
# else
- PATCH_CASE(cpu, iret, xxl, ibuf, len);
+ PATCH_CASE(cpu, iret, xxl, insn_buff, len);
# endif
#endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS
case PARAVIRT_PATCH(lock.queued_spin_unlock):
if (pv_is_native_spin_unlock())
- return PATCH(lock, queued_spin_unlock, ibuf, len);
+ return PATCH(lock, queued_spin_unlock, insn_buff, len);
break;
case PARAVIRT_PATCH(lock.vcpu_is_preempted):
if (pv_is_native_vcpu_is_preempted())
- return PATCH(lock, vcpu_is_preempted, ibuf, len);
+ return PATCH(lock, vcpu_is_preempted, insn_buff, len);
break;
#endif
default:
break;
}
- return paravirt_patch_default(type, ibuf, addr, len);
+ return paravirt_patch_default(type, insn_buff, addr, len);
}