summaryrefslogtreecommitdiffstats
path: root/drivers/lguest/x86
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lguest/x86')
-rw-r--r--drivers/lguest/x86/core.c62
1 files changed, 61 insertions, 1 deletions
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index bf7942327bd..a6b717644be 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -290,6 +290,57 @@ static int emulate_insn(struct lg_cpu *cpu)
return 1;
}
+/* Our hypercalls mechanism used to be based on direct software interrupts.
+ * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to
+ * change over to using kvm hypercalls.
+ *
+ * KVM_HYPERCALL is actually a "vmcall" instruction, which generates an invalid
+ * opcode fault (fault 6) on non-VT cpus, so the easiest solution seemed to be
+ * an *emulation approach*: if the fault was really produced by an hypercall
+ * (is_hypercall() does exactly this check), we can just call the corresponding
+ * hypercall host implementation function.
+ *
+ * But these invalid opcode faults are notably slower than software interrupts.
+ * So we implemented the *patching (or rewriting) approach*: every time we hit
+ * the KVM_HYPERCALL opcode in Guest code, we patch it to the old "int 0x1f"
+ * opcode, so next time the Guest calls this hypercall it will use the
+ * faster trap mechanism.
+ *
+ * Matias even benchmarked it to convince you: this shows the average cycle
+ * cost of a hypercall. For each alternative solution mentioned above we've
+ * made 5 runs of the benchmark:
+ *
+ * 1) direct software interrupt: 2915, 2789, 2764, 2721, 2898
+ * 2) emulation technique: 3410, 3681, 3466, 3392, 3780
+ * 3) patching (rewrite) technique: 2977, 2975, 2891, 2637, 2884
+ *
+ * One two-line function is worth a 20% hypercall speed boost!
+ */
+static void rewrite_hypercall(struct lg_cpu *cpu)
+{
+ /* This are the opcodes we use to patch the Guest. The opcode for "int
+ * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we
+ * complete the sequence with a NOP (0x90). */
+ u8 insn[3] = {0xcd, 0x1f, 0x90};
+
+ __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn));
+}
+
+static bool is_hypercall(struct lg_cpu *cpu)
+{
+ u8 insn[3];
+
+ /* This must be the Guest kernel trying to do something.
+ * The bottom two bits of the CS segment register are the privilege
+ * level. */
+ if ((cpu->regs->cs & 3) != GUEST_PL)
+ return false;
+
+ /* Is it a vmcall? */
+ __lgread(cpu, insn, guest_pa(cpu, cpu->regs->eip), sizeof(insn));
+ return insn[0] == 0x0f && insn[1] == 0x01 && insn[2] == 0xc1;
+}
+
/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
void lguest_arch_handle_trap(struct lg_cpu *cpu)
{
@@ -337,7 +388,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
break;
case 32 ... 255:
/* These values mean a real interrupt occurred, in which case
- * the Host handler has already been run. We just do a
+ * the Host handler has already been run. We just do a
* friendly check if another process should now be run, then
* return to run the Guest again */
cond_resched();
@@ -347,6 +398,15 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
* up the pointer now to indicate a hypercall is pending. */
cpu->hcall = (struct hcall_args *)cpu->regs;
return;
+ case 6:
+ /* kvm hypercalls trigger an invalid opcode fault (6).
+ * We need to check if ring == GUEST_PL and
+ * faulting instruction == vmcall. */
+ if (is_hypercall(cpu)) {
+ rewrite_hypercall(cpu);
+ return;
+ }
+ break;
}
/* We didn't handle the trap, so it needs to go to the Guest. */