WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-ia64-devel] [PATCH] add support for hvm live migration

To: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-ia64-devel] [PATCH] add support for hvm live migration
From: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
Date: Tue, 12 Feb 2008 17:54:39 +0900
Delivery-date: Tue, 12 Feb 2008 00:54:57 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
Hi,

This is a naive implementation of log dirty mode for HVM.
(I gave up on writing an dirty-bit fault handler in assembler.)

I succeeded in migrating a HVM which is building a kernel.
a HVM with PV drivers can't be migrated yet. 

Thanks,
Kouya

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>

diff -r 9203ee23e724 xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c     Thu Feb 07 11:08:49 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_fault.c     Tue Feb 12 16:21:34 2008 +0900
@@ -52,6 +52,7 @@
 #include <asm/vmx_phy_mode.h>
 #include <xen/mm.h>
 #include <asm/vmx_pal.h>
+#include <asm/shadow.h>
 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
 
@@ -520,3 +521,45 @@ try_again:
     itlb_fault(v, vadr);
     return IA64_FAULT;
 }
+
+void
+vmx_ia64_shadow_fault(unsigned long ifa, unsigned long itir,
+                      unsigned long mpa, struct pt_regs *regs)
+{
+    struct vcpu *v = current;
+    struct domain *d = v->domain;
+    unsigned long gpfn;
+    unsigned long pte;
+    thash_data_t *data;
+
+    gpfn = get_gpfn_from_mfn(mpa >> PAGE_SHIFT);
+    data = vhpt_lookup(ifa);
+    if (data) {
+        pte = data->page_flags;
+        // BUG_ON((pte ^ mpa) & (_PAGE_PPN_MASK & PAGE_MASK));
+        if (!(pte & _PAGE_VIRT_D))
+            goto inject_dirty_bit;
+        data->page_flags = pte | _PAGE_D;
+    } else {
+        data = vtlb_lookup(v, ifa, DSIDE_TLB);
+        if (data) {
+            pte = data->page_flags;
+            if (!(pte & _PAGE_VIRT_D))
+                goto inject_dirty_bit;
+        }
+    }
+
+    /* Set the dirty bit in the bitmap.  */
+    shadow_mark_page_dirty(d, gpfn);
+
+    /* Retry */
+    atomic64_inc(&d->arch.shadow_fault_count);
+    ia64_ptcl(ifa, PAGE_SHIFT << 2);
+    return;
+
+inject_dirty_bit:
+    /* Reflect. no need to purge.  */
+    set_ifa_itir_iha (v, ifa, 1, 1, 1);
+    inject_guest_interruption(v, IA64_DIRTY_BIT_VECTOR);
+    return;
+}
diff -r 9203ee23e724 xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Thu Feb 07 11:08:49 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Tue Feb 12 16:37:32 2008 +0900
@@ -433,8 +433,27 @@ END(vmx_dkey_miss)
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
 ENTRY(vmx_dirty_bit)
-    VMX_DBG_FAULT(8)
-    VMX_REFLECT(8)
+    mov r29=cr.ipsr
+    mov r31=pr
+    ;;
+    tbit.z p6,p0=r29,IA64_PSR_VM_BIT
+    mov r19=8
+(p6)br.spnt.many dispatch_to_fault_handler
+    mov r19=cr.ifa
+    movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
+    ;;
+    tpa r19=r19
+    // If shadow mode is not enabled, reflect the fault.
+    ld8 r22=[r22]
+    ;;
+    add r22=IA64_VCPU_SHADOW_BITMAP_OFFSET,r22
+    ;;
+    ld8 r22=[r22]
+    ;;
+    cmp.eq p6,p0=r0,r22 // !shadow_bitmap ?
+(p6)br.dptk.many vmx_dispatch_reflection
+    br.sptk vmx_dispatch_shadow_fault
+    VMX_FAULT(8);
 END(vmx_dirty_bit)
 
     .org vmx_ia64_ivt+0x2400
@@ -1332,6 +1351,30 @@ ENTRY(vmx_dispatch_interrupt)
     br.call.sptk.many b6=ia64_handle_irq
 END(vmx_dispatch_interrupt)
 
+
+ENTRY(vmx_dispatch_shadow_fault)
+    VMX_SAVE_MIN_WITH_COVER_R19
+    alloc r14=ar.pfs,0,0,4,0
+    mov out0=cr.ifa
+    mov out1=cr.itir
+    mov out2=r15
+    adds r3=8,r2                // set up second base pointer
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    (p15) ssm psr.i               // restore psr.i
+    movl r14=ia64_leave_hypervisor
+    ;;
+    VMX_SAVE_REST
+    mov rp=r14
+    ;;
+    P6_BR_CALL_PANIC(.Lvmx_dispatch_shadow_fault_string)
+    adds out3=16,r12
+    br.call.sptk.many b6=vmx_ia64_shadow_fault
+END(vmx_dispatch_shadow_fault)
+
 .Lvmx_dispatch_reflection_string:
     .asciz "vmx_dispatch_reflection\n"
 .Lvmx_dispatch_virtualization_fault_string:
@@ -1340,3 +1383,5 @@ END(vmx_dispatch_interrupt)
     .asciz "vmx_dispatch_vexirq\n"
 .Lvmx_dispatch_tlb_miss_string:
     .asciz "vmx_dispatch_tlb_miss\n"
+.Lvmx_dispatch_shadow_fault_string:
+    .asciz "vmx_dispatch_shadow_fault\n"
diff -r 9203ee23e724 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Thu Feb 07 11:08:49 2008 -0700
+++ b/xen/arch/ia64/vmx/vtlb.c  Tue Feb 12 16:23:13 2008 +0900
@@ -22,6 +22,7 @@
 
 #include <asm/vmx_vcpu.h>
 #include <asm/vmx_phy_mode.h>
+#include <asm/shadow.h>
 
 static thash_data_t *__alloc_chain(thash_cb_t *);
 
@@ -132,7 +133,7 @@ static void vmx_vhpt_insert(thash_cb_t *
     ia64_rr rr;
     thash_data_t *head, *cch;
 
-    pte = pte & ~PAGE_FLAGS_RV_MASK;
+    pte &= ((~PAGE_FLAGS_RV_MASK)|_PAGE_VIRT_D);
     rr.rrval = ia64_get_rr(ifa);
     head = (thash_data_t *)ia64_thash(ifa);
     tag = ia64_ttag(ifa);
@@ -514,13 +515,14 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
     u64 ps, ps_mask, paddr, maddr;
 //    ia64_rr rr;
     union pte_flags phy_pte;
+    struct domain *d = v->domain;
 
     ps = itir_ps(itir);
     ps_mask = ~((1UL << ps) - 1);
     phy_pte.val = *pte;
     paddr = *pte;
     paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
-    maddr = lookup_domain_mpa(v->domain, paddr, NULL);
+    maddr = lookup_domain_mpa(d, paddr, NULL);
     if (maddr & GPFN_IO_MASK) {
         *pte |= VTLB_PTE_IO;
         return -1;
@@ -536,6 +538,18 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
 //    ps = rr.ps;
     maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
     phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
+
+    /* If shadow mode is enabled, virtualize dirty bit.  */
+    if (shadow_mode_enabled(d) && phy_pte.d) {
+        u64 gpfn = paddr >> PAGE_SHIFT;
+        phy_pte.val |= _PAGE_VIRT_D;
+
+        /* If the page is not already dirty, don't set the dirty bit! */
+        if (gpfn < d->arch.shadow_bitmap_size * 8
+            && !test_bit(gpfn, d->arch.shadow_bitmap))
+            phy_pte.d = 0;
+    }
+
     return phy_pte.val;
 }
 
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel