summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-05-06 14:24:32 +0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-07-11 11:14:02 +0400
commit285ca9e948fa047e51fe47082528034de5369e8d (patch)
treee2aa61f852b3f2896b2ca6eb5c23275387ec9dae
parent41061cdb98a0bec464278b4db8e894a3121671f5 (diff)
downloadlinux-285ca9e948fa047e51fe47082528034de5369e8d.tar.xz
KVM: emulate: speed up do_insn_fetch
Hoist the common case up from do_insn_fetch_byte to do_insn_fetch, and prime the fetch_cache in x86_decode_insn. This helps a bit the compiler and the branch predictor, but above all it lays the ground for further changes in the next few patches. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/emulate.c67
1 files changed, 36 insertions, 31 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 390400a54a9c..ea188a338afb 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -705,51 +705,51 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
}
/*
- * Fetch the next byte of the instruction being emulated which is pointed to
- * by ctxt->_eip, then increment ctxt->_eip.
- *
- * Also prefetch the remaining bytes of the instruction without crossing page
+ * Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
*/
-static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
+static int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt)
{
struct fetch_cache *fc = &ctxt->fetch;
int rc;
int size, cur_size;
-
- if (ctxt->_eip == fc->end) {
- unsigned long linear;
- struct segmented_address addr = { .seg = VCPU_SREG_CS,
- .ea = ctxt->_eip };
- cur_size = fc->end - fc->start;
- size = min(15UL - cur_size,
- PAGE_SIZE - offset_in_page(ctxt->_eip));
- rc = __linearize(ctxt, addr, size, false, true, &linear);
- if (unlikely(rc != X86EMUL_CONTINUE))
- return rc;
- rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
- size, &ctxt->exception);
- if (unlikely(rc != X86EMUL_CONTINUE))
- return rc;
- fc->end += size;
- }
- *dest = fc->data[ctxt->_eip - fc->start];
- ctxt->_eip++;
+ unsigned long linear;
+
+ struct segmented_address addr = { .seg = VCPU_SREG_CS,
+ .ea = fc->end };
+ cur_size = fc->end - fc->start;
+ size = min(15UL - cur_size,
+ PAGE_SIZE - offset_in_page(fc->end));
+ if (unlikely(size == 0))
+ return X86EMUL_UNHANDLEABLE;
+ rc = __linearize(ctxt, addr, size, false, true, &linear);
+ if (unlikely(rc != X86EMUL_CONTINUE))
+ return rc;
+ rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
+ size, &ctxt->exception);
+ if (unlikely(rc != X86EMUL_CONTINUE))
+ return rc;
+ fc->end += size;
return X86EMUL_CONTINUE;
}
static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
- void *dest, unsigned size)
+ void *__dest, unsigned size)
{
int rc;
+ struct fetch_cache *fc = &ctxt->fetch;
+ u8 *dest = __dest;
+ u8 *src = &fc->data[ctxt->_eip - fc->start];
- /* x86 instructions are limited to 15 bytes. */
- if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
- return X86EMUL_UNHANDLEABLE;
while (size--) {
- rc = do_insn_fetch_byte(ctxt, dest++);
- if (rc != X86EMUL_CONTINUE)
- return rc;
+ if (unlikely(ctxt->_eip == fc->end)) {
+ rc = do_insn_fetch_bytes(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ }
+ *dest++ = *src++;
+ ctxt->_eip++;
+ continue;
}
return X86EMUL_CONTINUE;
}
@@ -4227,6 +4227,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
+ else {
+ rc = do_insn_fetch_bytes(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ }
switch (mode) {
case X86EMUL_MODE_REAL: