WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-3.2-testing] x86 hvm: Support task switch when task

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-3.2-testing] x86 hvm: Support task switch when task state segments straddle page
From: "Xen patchbot-3.2-testing" <patchbot-3.2-testing@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 30 May 2008 11:01:54 -0700
Delivery-date: Fri, 30 May 2008 12:22:59 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1212162483 -3600
# Node ID 27351129428a63b0b4ed6e96255af18806b3d9f9
# Parent  89193df0ec1ca2b94bef17152db0c3e9da15f8d6
x86 hvm: Support task switch when task state segments straddle page
boundaries. Also improve error diagnostics from hvm_map().

Fixes multi-processor shutdown for some types of Windows OS.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   17758:9c14ba60616d63ff849067dda9958c798f3f838b
xen-unstable date:        Fri May 30 16:30:40 2008 +0100

x86 hvm: Fix task-switch operation ordering.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   17759:487dc63f95ff433eabb132253c5ff31cc178c00a
xen-unstable date:        Fri May 30 16:44:13 2008 +0100
---
 xen/arch/x86/hvm/hvm.c |  137 +++++++++++++++++++++++++------------------------
 1 files changed, 70 insertions(+), 67 deletions(-)

diff -r 89193df0ec1c -r 27351129428a xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri May 30 16:36:05 2008 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri May 30 16:48:03 2008 +0100
@@ -927,16 +927,17 @@ int hvm_virtual_to_linear_addr(
     return 0;
 }
 
-static void *hvm_map(unsigned long va, int size)
+static void *hvm_map_entry(unsigned long va)
 {
     unsigned long gfn, mfn;
     p2m_type_t p2mt;
     uint32_t pfec;
 
-    if ( ((va & ~PAGE_MASK) + size) > PAGE_SIZE )
-    {
-        hvm_inject_exception(TRAP_page_fault, PFEC_write_access,
-                             (va + PAGE_SIZE - 1) & PAGE_MASK);
+    if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE )
+    {
+        gdprintk(XENLOG_ERR, "Descriptor table entry "
+                 "straddles page boundary\n");
+        domain_crash(current->domain);
         return NULL;
     }
 
@@ -948,7 +949,8 @@ static void *hvm_map(unsigned long va, i
     mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
     if ( !p2m_is_ram(p2mt) )
     {
-        hvm_inject_exception(TRAP_page_fault, pfec, va);
+        gdprintk(XENLOG_ERR, "Failed to look up descriptor table entry\n");
+        domain_crash(current->domain);
         return NULL;
     }
 
@@ -959,7 +961,7 @@ static void *hvm_map(unsigned long va, i
     return (char *)map_domain_page(mfn) + (va & ~PAGE_MASK);
 }
 
-static void hvm_unmap(void *p)
+static void hvm_unmap_entry(void *p)
 {
     if ( p )
         unmap_domain_page(p);
@@ -995,7 +997,7 @@ static int hvm_load_segment_selector(
     if ( ((sel & 0xfff8) + 7) > desctab.limit )
         goto fail;
 
-    pdesc = hvm_map(desctab.base + (sel & 0xfff8), 8);
+    pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8));
     if ( pdesc == NULL )
         goto hvm_map_fail;
 
@@ -1055,7 +1057,7 @@ static int hvm_load_segment_selector(
     desc.b |= 0x100;
 
  skip_accessed_flag:
-    hvm_unmap(pdesc);
+    hvm_unmap_entry(pdesc);
 
     segr.base = (((desc.b <<  0) & 0xff000000u) |
                  ((desc.b << 16) & 0x00ff0000u) |
@@ -1071,7 +1073,7 @@ static int hvm_load_segment_selector(
     return 0;
 
  unmap_and_fail:
-    hvm_unmap(pdesc);
+    hvm_unmap_entry(pdesc);
  fail:
     hvm_inject_exception(fault_type, sel & 0xfffc, 0);
  hvm_map_fail:
@@ -1087,7 +1089,7 @@ void hvm_task_switch(
     struct segment_register gdt, tr, prev_tr, segr;
     struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
     unsigned long eflags;
-    int exn_raised;
+    int exn_raised, rc;
     struct {
         u16 back_link,__blh;
         u32 esp0;
@@ -1099,7 +1101,7 @@ void hvm_task_switch(
         u32 cr3, eip, eflags, eax, ecx, edx, ebx, esp, ebp, esi, edi;
         u16 es, _3, cs, _4, ss, _5, ds, _6, fs, _7, gs, _8, ldt, _9;
         u16 trace, iomap;
-    } *ptss, tss;
+    } tss = { 0 };
 
     hvm_get_segment_register(v, x86_seg_gdtr, &gdt);
     hvm_get_segment_register(v, x86_seg_tr, &prev_tr);
@@ -1112,11 +1114,11 @@ void hvm_task_switch(
         goto out;
     }
 
-    optss_desc = hvm_map(gdt.base + (prev_tr.sel & 0xfff8), 8);
+    optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8));
     if ( optss_desc == NULL )
         goto out;
 
-    nptss_desc = hvm_map(gdt.base + (tss_sel & 0xfff8), 8);
+    nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8));
     if ( nptss_desc == NULL )
         goto out;
 
@@ -1151,84 +1153,85 @@ void hvm_task_switch(
         goto out;
     }
 
-    ptss = hvm_map(prev_tr.base, sizeof(tss));
-    if ( ptss == NULL )
+    rc = hvm_copy_from_guest_virt(&tss, prev_tr.base, sizeof(tss));
+    if ( rc == HVMCOPY_bad_gva_to_gfn )
         goto out;
 
     eflags = regs->eflags;
     if ( taskswitch_reason == TSW_iret )
         eflags &= ~X86_EFLAGS_NT;
 
-    ptss->cr3    = v->arch.hvm_vcpu.guest_cr[3];
-    ptss->eip    = regs->eip;
-    ptss->eflags = eflags;
-    ptss->eax    = regs->eax;
-    ptss->ecx    = regs->ecx;
-    ptss->edx    = regs->edx;
-    ptss->ebx    = regs->ebx;
-    ptss->esp    = regs->esp;
-    ptss->ebp    = regs->ebp;
-    ptss->esi    = regs->esi;
-    ptss->edi    = regs->edi;
+    tss.cr3    = v->arch.hvm_vcpu.guest_cr[3];
+    tss.eip    = regs->eip;
+    tss.eflags = eflags;
+    tss.eax    = regs->eax;
+    tss.ecx    = regs->ecx;
+    tss.edx    = regs->edx;
+    tss.ebx    = regs->ebx;
+    tss.esp    = regs->esp;
+    tss.ebp    = regs->ebp;
+    tss.esi    = regs->esi;
+    tss.edi    = regs->edi;
 
     hvm_get_segment_register(v, x86_seg_es, &segr);
-    ptss->es = segr.sel;
+    tss.es = segr.sel;
     hvm_get_segment_register(v, x86_seg_cs, &segr);
-    ptss->cs = segr.sel;
+    tss.cs = segr.sel;
     hvm_get_segment_register(v, x86_seg_ss, &segr);
-    ptss->ss = segr.sel;
+    tss.ss = segr.sel;
     hvm_get_segment_register(v, x86_seg_ds, &segr);
-    ptss->ds = segr.sel;
+    tss.ds = segr.sel;
     hvm_get_segment_register(v, x86_seg_fs, &segr);
-    ptss->fs = segr.sel;
+    tss.fs = segr.sel;
     hvm_get_segment_register(v, x86_seg_gs, &segr);
-    ptss->gs = segr.sel;
+    tss.gs = segr.sel;
     hvm_get_segment_register(v, x86_seg_ldtr, &segr);
-    ptss->ldt = segr.sel;
-
-    hvm_unmap(ptss);
-
-    ptss = hvm_map(tr.base, sizeof(tss));
-    if ( ptss == NULL )
+    tss.ldt = segr.sel;
+
+    rc = hvm_copy_to_guest_virt(prev_tr.base, &tss, sizeof(tss));
+    if ( rc == HVMCOPY_bad_gva_to_gfn )
         goto out;
 
-    if ( !hvm_set_cr3(ptss->cr3) )
-    {
-        hvm_unmap(ptss);
+    rc = hvm_copy_from_guest_virt(&tss, tr.base, sizeof(tss));
+    if ( rc == HVMCOPY_bad_gva_to_gfn )
         goto out;
-    }
-
-    regs->eip    = ptss->eip;
-    regs->eflags = ptss->eflags | 2;
-    regs->eax    = ptss->eax;
-    regs->ecx    = ptss->ecx;
-    regs->edx    = ptss->edx;
-    regs->ebx    = ptss->ebx;
-    regs->esp    = ptss->esp;
-    regs->ebp    = ptss->ebp;
-    regs->esi    = ptss->esi;
-    regs->edi    = ptss->edi;
+
+    if ( !hvm_set_cr3(tss.cr3) )
+        goto out;
+
+    regs->eip    = tss.eip;
+    regs->eflags = tss.eflags | 2;
+    regs->eax    = tss.eax;
+    regs->ecx    = tss.ecx;
+    regs->edx    = tss.edx;
+    regs->ebx    = tss.ebx;
+    regs->esp    = tss.esp;
+    regs->ebp    = tss.ebp;
+    regs->esi    = tss.esi;
+    regs->edi    = tss.edi;
 
     if ( (taskswitch_reason == TSW_call_or_int) )
     {
         regs->eflags |= X86_EFLAGS_NT;
-        ptss->back_link = prev_tr.sel;
+        tss.back_link = prev_tr.sel;
     }
 
     exn_raised = 0;
-    if ( hvm_load_segment_selector(v, x86_seg_es, ptss->es) ||
-         hvm_load_segment_selector(v, x86_seg_cs, ptss->cs) ||
-         hvm_load_segment_selector(v, x86_seg_ss, ptss->ss) ||
-         hvm_load_segment_selector(v, x86_seg_ds, ptss->ds) ||
-         hvm_load_segment_selector(v, x86_seg_fs, ptss->fs) ||
-         hvm_load_segment_selector(v, x86_seg_gs, ptss->gs) ||
-         hvm_load_segment_selector(v, x86_seg_ldtr, ptss->ldt) )
+    if ( hvm_load_segment_selector(v, x86_seg_es, tss.es) ||
+         hvm_load_segment_selector(v, x86_seg_cs, tss.cs) ||
+         hvm_load_segment_selector(v, x86_seg_ss, tss.ss) ||
+         hvm_load_segment_selector(v, x86_seg_ds, tss.ds) ||
+         hvm_load_segment_selector(v, x86_seg_fs, tss.fs) ||
+         hvm_load_segment_selector(v, x86_seg_gs, tss.gs) ||
+         hvm_load_segment_selector(v, x86_seg_ldtr, tss.ldt) )
         exn_raised = 1;
 
-    if ( (ptss->trace & 1) && !exn_raised )
+    rc = hvm_copy_to_guest_virt(tr.base, &tss, sizeof(tss));
+    if ( rc == HVMCOPY_bad_gva_to_gfn )
+        exn_raised = 1;
+
+    if ( (tss.trace & 1) && !exn_raised )
         hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0);
-
-    hvm_unmap(ptss);
 
     tr.attr.fields.type = 0xb; /* busy 32-bit tss */
     hvm_set_segment_register(v, x86_seg_tr, &tr);
@@ -1257,8 +1260,8 @@ void hvm_task_switch(
     }
 
  out:
-    hvm_unmap(optss_desc);
-    hvm_unmap(nptss_desc);
+    hvm_unmap_entry(optss_desc);
+    hvm_unmap_entry(nptss_desc);
 }
 
 /*

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-3.2-testing] x86 hvm: Support task switch when task state segments straddle page, Xen patchbot-3.2-testing <=