Sorry again.
I succeeded in windows server 2003 live migration with an attached patch.
-- Kouya
Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
Alex Williamson writes:
>
> On Wed, 2008-02-13 at 15:04 +0900, Kouya Shimura wrote:
> > Hi Alex,
> >
> > I'm so sorry. The previous patch causes "Guest nested fault"
> > when a HVM domain is created.
> > Please replace it with an attached patch.
>
> Hi Kouya,
>
> Seems like there still some kind of issue along those lines. I can't
> boot windows server 2003 with this patch. It seems to sit indefinitely
> on the first graphics boot screen. The scroller keeps going, but there
> appears to be no forward progress. Thanks,
>
> Alex
>
> --
> Alex Williamson HP Open Source & Linux Org.
diff -r 9203ee23e724 xen/arch/ia64/vmx/vmx_fault.c
--- a/xen/arch/ia64/vmx/vmx_fault.c Thu Feb 07 11:08:49 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_fault.c Thu Feb 14 12:11:02 2008 +0900
@@ -52,6 +52,7 @@
#include <asm/vmx_phy_mode.h>
#include <xen/mm.h>
#include <asm/vmx_pal.h>
+#include <asm/shadow.h>
/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
@@ -520,3 +521,47 @@ try_again:
itlb_fault(v, vadr);
return IA64_FAULT;
}
+
+void
+vmx_ia64_shadow_fault(u64 ifa, u64 isr, u64 mpa, REGS *regs)
+{
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
+ u64 gpfn, pte;
+ thash_data_t *data;
+
+ if (!shadow_mode_enabled(d))
+ goto inject_dirty_bit;
+
+ gpfn = get_gpfn_from_mfn(mpa >> PAGE_SHIFT);
+ data = vhpt_lookup(ifa);
+ if (data) {
+ pte = data->page_flags;
+ // BUG_ON((pte ^ mpa) & (_PAGE_PPN_MASK & PAGE_MASK));
+ if (!(pte & _PAGE_VIRT_D))
+ goto inject_dirty_bit;
+ data->page_flags = pte | _PAGE_D;
+ } else {
+ data = vtlb_lookup(v, ifa, DSIDE_TLB);
+ if (data) {
+ if (!(data->page_flags & _PAGE_VIRT_D))
+ goto inject_dirty_bit;
+ }
+ pte = 0;
+ }
+
+ /* Set the dirty bit in the bitmap. */
+ shadow_mark_page_dirty(d, gpfn);
+
+ /* Retry */
+ atomic64_inc(&d->arch.shadow_fault_count);
+ ia64_ptcl(ifa, PAGE_SHIFT << 2);
+ return;
+
+inject_dirty_bit:
+ /* Reflect. no need to purge. */
+ VCPU(v, isr) = isr;
+ set_ifa_itir_iha (v, ifa, 1, 1, 1);
+ inject_guest_interruption(v, IA64_DIRTY_BIT_VECTOR);
+ return;
+}
diff -r 9203ee23e724 xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S Thu Feb 07 11:08:49 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_ivt.S Thu Feb 14 12:00:18 2008 +0900
@@ -433,8 +433,16 @@ END(vmx_dkey_miss)
/////////////////////////////////////////////////////////////////////////////////////////
// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
ENTRY(vmx_dirty_bit)
- VMX_DBG_FAULT(8)
- VMX_REFLECT(8)
+ mov r29=cr.ipsr
+ mov r31=pr
+ ;;
+ mov r19=cr.ifa
+ tbit.z p6,p0=r29,IA64_PSR_VM_BIT
+(p6)br.spnt.many vmx_fault_8
+ ;;
+ tpa r19=r19
+ br.sptk vmx_dispatch_shadow_fault
+ VMX_FAULT(8)
END(vmx_dirty_bit)
.org vmx_ia64_ivt+0x2400
@@ -1332,6 +1340,30 @@ ENTRY(vmx_dispatch_interrupt)
br.call.sptk.many b6=ia64_handle_irq
END(vmx_dispatch_interrupt)
+
+ENTRY(vmx_dispatch_shadow_fault)
+ VMX_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,4,0
+ mov out0=cr.ifa
+ mov out1=cr.isr
+ mov out2=r15
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ (p15) ssm psr.i // restore psr.i
+ movl r14=ia64_leave_hypervisor
+ ;;
+ VMX_SAVE_REST
+ mov rp=r14
+ ;;
+ P6_BR_CALL_PANIC(.Lvmx_dispatch_shadow_fault_string)
+ adds out3=16,r12
+ br.call.sptk.many b6=vmx_ia64_shadow_fault
+END(vmx_dispatch_shadow_fault)
+
.Lvmx_dispatch_reflection_string:
.asciz "vmx_dispatch_reflection\n"
.Lvmx_dispatch_virtualization_fault_string:
@@ -1340,3 +1372,5 @@ END(vmx_dispatch_interrupt)
.asciz "vmx_dispatch_vexirq\n"
.Lvmx_dispatch_tlb_miss_string:
.asciz "vmx_dispatch_tlb_miss\n"
+.Lvmx_dispatch_shadow_fault_string:
+ .asciz "vmx_dispatch_shadow_fault\n"
diff -r 9203ee23e724 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c Thu Feb 07 11:08:49 2008 -0700
+++ b/xen/arch/ia64/vmx/vtlb.c Thu Feb 14 11:59:49 2008 +0900
@@ -22,6 +22,7 @@
#include <asm/vmx_vcpu.h>
#include <asm/vmx_phy_mode.h>
+#include <asm/shadow.h>
static thash_data_t *__alloc_chain(thash_cb_t *);
@@ -132,7 +133,7 @@ static void vmx_vhpt_insert(thash_cb_t *
ia64_rr rr;
thash_data_t *head, *cch;
- pte = pte & ~PAGE_FLAGS_RV_MASK;
+ pte &= ((~PAGE_FLAGS_RV_MASK)|_PAGE_VIRT_D);
rr.rrval = ia64_get_rr(ifa);
head = (thash_data_t *)ia64_thash(ifa);
tag = ia64_ttag(ifa);
@@ -514,13 +515,14 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
u64 ps, ps_mask, paddr, maddr;
// ia64_rr rr;
union pte_flags phy_pte;
+ struct domain *d = v->domain;
ps = itir_ps(itir);
ps_mask = ~((1UL << ps) - 1);
phy_pte.val = *pte;
paddr = *pte;
paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
- maddr = lookup_domain_mpa(v->domain, paddr, NULL);
+ maddr = lookup_domain_mpa(d, paddr, NULL);
if (maddr & GPFN_IO_MASK) {
*pte |= VTLB_PTE_IO;
return -1;
@@ -536,6 +538,18 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
// ps = rr.ps;
maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
+
+ /* If shadow mode is enabled, virtualize dirty bit. */
+ if (shadow_mode_enabled(d) && phy_pte.d) {
+ u64 gpfn = paddr >> PAGE_SHIFT;
+ phy_pte.val |= _PAGE_VIRT_D;
+
+ /* If the page is not already dirty, don't set the dirty bit! */
+ if (gpfn < d->arch.shadow_bitmap_size * 8
+ && !test_bit(gpfn, d->arch.shadow_bitmap))
+ phy_pte.d = 0;
+ }
+
return phy_pte.val;
}
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|