WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/mm: use new page-order interfaces in

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/mm: use new page-order interfaces in nested HAP code
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Thu, 08 Sep 2011 18:44:10 +0100
Delivery-date: Thu, 08 Sep 2011 10:45:41 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <tim@xxxxxxx>
# Date 1315491186 -3600
# Node ID 0312575dc35e4294eb50e365b2c10078914daca8
# Parent  ac33d68e89767d49113824e5661c49a5465a18e7
x86/mm: use new page-order interfaces in nested HAP code
to make 2M and 1G mappings in the nested p2m tables.

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Signed-off-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r ac33d68e8976 -r 0312575dc35e xen/arch/x86/mm/hap/nested_hap.c
--- a/xen/arch/x86/mm/hap/nested_hap.c  Thu Sep 08 15:13:06 2011 +0100
+++ b/xen/arch/x86/mm/hap/nested_hap.c  Thu Sep 08 15:13:06 2011 +0100
@@ -99,7 +99,7 @@
 static void
 nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m, 
                   paddr_t L2_gpa, paddr_t L0_gpa,
-                  p2m_type_t p2mt, p2m_access_t p2ma)
+                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
     int rv = 1;
     ASSERT(p2m);
@@ -111,9 +111,20 @@
      * leave it alone.  We'll pick up the right one as we try to 
      * vmenter the guest. */
     if ( p2m->cr3 == nhvm_vcpu_hostcr3(v) )
-         rv = set_p2m_entry(p2m, L2_gpa >> PAGE_SHIFT,
-                            page_to_mfn(maddr_to_page(L0_gpa)),
-                            0 /*4K*/, p2mt, p2ma);
+    {
+        unsigned long gfn, mask;
+        mfn_t mfn;
+
+        /* If this is a superpage mapping, round down both addresses
+         * to the start of the superpage. */
+        mask = ~((1UL << page_order) - 1);
+
+        gfn = (L2_gpa >> PAGE_SHIFT) & mask;
+        mfn = _mfn((L0_gpa >> PAGE_SHIFT) & mask);
+
+        rv = set_p2m_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+    }
+
     p2m_unlock(p2m);
 
     if (rv == 0) {
@@ -129,7 +140,8 @@
  * value tells the upper level what to do.
  */
 static int
-nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa)
+nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa,
+                      unsigned int *page_order)
 {
     mfn_t mfn;
     p2m_type_t p2mt;
@@ -137,7 +149,7 @@
 
     /* walk L0 P2M table */
     mfn = gfn_to_mfn_type_p2m(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, 
-                              p2m_query, NULL);
+                              p2m_query, page_order);
 
     if ( p2m_is_paging(p2mt) || p2m_is_shared(p2mt) || !p2m_is_ram(p2mt) )
         return NESTEDHVM_PAGEFAULT_ERROR;
@@ -154,7 +166,8 @@
  * L1_gpa. The result value tells what to do next.
  */
 static int
-nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa)
+nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
+                      unsigned int *page_order)
 {
     uint32_t pfec;
     unsigned long nested_cr3, gfn;
@@ -162,7 +175,7 @@
     nested_cr3 = nhvm_vcpu_hostcr3(v);
 
     /* Walk the guest-supplied NPT table, just as if it were a pagetable */
-    gfn = paging_ga_to_gfn_cr3(v, nested_cr3, L2_gpa, &pfec, NULL);
+    gfn = paging_ga_to_gfn_cr3(v, nested_cr3, L2_gpa, &pfec, page_order);
 
     if ( gfn == INVALID_GFN ) 
         return NESTEDHVM_PAGEFAULT_INJECT;
@@ -183,12 +196,13 @@
     paddr_t L1_gpa, L0_gpa;
     struct domain *d = v->domain;
     struct p2m_domain *p2m, *nested_p2m;
+    unsigned int page_order_21, page_order_10, page_order_20;
 
     p2m = p2m_get_hostp2m(d); /* L0 p2m */
     nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v));
 
     /* walk the L1 P2M table */
-    rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa);
+    rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa, &page_order_21);
 
     /* let caller to handle these two cases */
     switch (rv) {
@@ -204,7 +218,7 @@
     }
 
     /* ==> we have to walk L0 P2M */
-    rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa);
+    rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, &page_order_10);
 
     /* let upper level caller to handle these two cases */
     switch (rv) {
@@ -219,8 +233,10 @@
         break;
     }
 
+    page_order_20 = min(page_order_21, page_order_10);
+
     /* fix p2m_get_pagetable(nested_p2m) */
-    nestedhap_fix_p2m(v, nested_p2m, L2_gpa, L0_gpa,
+    nestedhap_fix_p2m(v, nested_p2m, L2_gpa, L0_gpa, page_order_20,
         p2m_ram_rw,
         p2m_access_rwx /* FIXME: Should use same permission as l1 guest */);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/mm: use new page-order interfaces in nested HAP code, Xen patchbot-unstable <=