diff -BbuNr xen-unstable.hg-orig/xen/arch/x86/hvm/hvm.c xen-unstable.hg-handle_ro/xen/arch/x86/hvm/hvm.c --- xen-unstable.hg-orig/xen/arch/x86/hvm/hvm.c 2008-07-08 15:52:50.000000000 +0100 +++ xen-unstable.hg-handle_ro/xen/arch/x86/hvm/hvm.c 2008-07-10 11:48:47.074848083 +0100 @@ -1492,6 +1492,18 @@ return HVMCOPY_bad_gfn_to_mfn; ASSERT(mfn_valid(mfn)); + if ( unlikely((flags & HVMCOPY_to_guest) && (p2mt == p2m_ram_ro)) ) + { + static unsigned long lastpage = 0; + if ( gfn != lastpage ) + { + lastpage = gfn; + gdprintk(XENLOG_DEBUG, "guest attempted write to read-only memory page. " + "page=%#lx, mfn=%#lx\n", (lastpage << PAGE_SHIFT), mfn); + } + goto nocopy; + } + p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK); if ( flags & HVMCOPY_to_guest ) @@ -1506,6 +1518,7 @@ unmap_domain_page(p); +nocopy: addr += count; buf += count; todo -= count; diff -BbuNr xen-unstable.hg-orig/xen/arch/x86/mm/shadow/multi.c xen-unstable.hg-handle_ro/xen/arch/x86/mm/shadow/multi.c --- xen-unstable.hg-orig/xen/arch/x86/mm/shadow/multi.c 2008-07-08 15:52:50.000000000 +0100 +++ xen-unstable.hg-handle_ro/xen/arch/x86/mm/shadow/multi.c 2008-07-10 11:20:05.455253583 +0100 @@ -3042,6 +3042,8 @@ int fast_emul = 0; #endif + emul_ctxt.no_write = 0; + SHADOW_PRINTK("d:v=%u:%u va=%#lx err=%u, rip=%lx\n", v->domain->domain_id, v->vcpu_id, va, regs->error_code, regs->rip); @@ -3344,14 +3346,25 @@ } } - /* Need to hand off device-model MMIO and writes to read-only - * memory to the device model */ - if ( p2mt == p2m_mmio_dm - || (p2mt == p2m_ram_ro && ft == ft_demand_write) ) + /* Need to hand off device-model MMIO to the device model */ + if ( p2mt == p2m_mmio_dm ) { gpa = guest_walk_to_gpa(&gw); goto mmio; } + /* Log attempts to write to read-only memory */ + else if (p2mt == p2m_ram_ro && ft == ft_demand_write) + { + static unsigned long lastpage = 0; + if ( (va & PAGE_MASK) != lastpage ) + { + lastpage = (va & PAGE_MASK); + gdprintk(XENLOG_DEBUG, "guest attempted write to read-only memory page. " + "va page=%#lx, mfn=%#lx\n", ( va & PAGE_MASK), mfn_x(gmfn)); + } + emul_ctxt.no_write = 1; + goto emulate; + } /* In HVM guests, we force CR0.WP always to be set, so that the * pagetables are always write-protected. If the guest thinks @@ -4776,6 +4789,9 @@ { void *addr; + if ( sh_ctxt->no_write ) + return X86EMUL_OKAY; + /* Unaligned writes are only acceptable on HVM */ if ( (vaddr & (bytes - 1)) && !is_hvm_vcpu(v) ) return X86EMUL_UNHANDLEABLE; diff -BbuNr xen-unstable.hg-orig/xen/arch/x86/mm/shadow/private.h xen-unstable.hg-handle_ro/xen/arch/x86/mm/shadow/private.h --- xen-unstable.hg-orig/xen/arch/x86/mm/shadow/private.h 2008-07-08 15:52:50.000000000 +0100 +++ xen-unstable.hg-handle_ro/xen/arch/x86/mm/shadow/private.h 2008-07-09 18:44:37.234991121 +0100 @@ -751,6 +751,9 @@ /* MFNs being written to in write/cmpxchg callbacks */ mfn_t mfn1, mfn2; + /* Discard writes */ + unsigned int no_write; + #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) /* Special case for avoiding having to verify writes: remember * whether the old value had its low bit (_PAGE_PRESENT) clear. */