diff -r c69ef56d8afc xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Tue Jul 03 11:44:28 2012 +0200 +++ b/xen/arch/x86/hvm/hvm.c Fri Jul 13 17:15:55 2012 +0200 @@ -1278,7 +1278,8 @@ int hvm_hap_nested_page_fault(unsigned l * into l1 guest if not fixable. The algorithm is * the same as for shadow paging. */ - rv = nestedhvm_hap_nested_page_fault(v, gpa); + rv = nestedhvm_hap_nested_page_fault(v, gpa, + access_r, access_w, access_x); switch (rv) { case NESTEDHVM_PAGEFAULT_DONE: return 1; diff -r c69ef56d8afc xen/arch/x86/mm/hap/nested_hap.c --- a/xen/arch/x86/mm/hap/nested_hap.c Tue Jul 03 11:44:28 2012 +0200 +++ b/xen/arch/x86/mm/hap/nested_hap.c Fri Jul 13 17:15:55 2012 +0200 @@ -177,13 +177,20 @@ out: */ static int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, - unsigned int *page_order) + unsigned int *page_order, + bool_t access_r, bool_t access_w, bool_t access_x) { uint32_t pfec; unsigned long nested_cr3, gfn; nested_cr3 = nhvm_vcpu_hostcr3(v); + pfec = PFEC_user_mode | PFEC_page_present; + if (access_w) + pfec |= PFEC_write_access; + if (access_x) + pfec |= PFEC_insn_fetch; + /* Walk the guest-supplied NPT table, just as if it were a pagetable */ gfn = paging_ga_to_gfn_cr3(v, nested_cr3, L2_gpa, &pfec, page_order); @@ -200,7 +207,8 @@ nestedhap_walk_L1_p2m(struct vcpu *v, pa * Returns: */ int -nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa) +nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa, + bool_t access_r, bool_t access_w, bool_t access_x) { int rv; paddr_t L1_gpa, L0_gpa; @@ -212,7 +220,8 @@ nestedhvm_hap_nested_page_fault(struct v nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v)); /* walk the L1 P2M table */ - rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa, &page_order_21); + rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa, &page_order_21, + access_r, access_w, access_x); /* let caller to handle these two cases */ switch (rv) {