[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 3/4] x86/altp2m: p2m_altp2m_get_or_propagate() should honor present page order


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Wed, 1 Dec 2021 11:54:39 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=pYFz+VhG8wlpeeBq7xjjYCfhq2Lp5V/iDsZjW/ovDgI=; b=ixA8H4aV6ECgiwPYpLv+hTtNNeyECSC2c6wU8FplOlOvbnqnFMPe/6A3LprPoDCQwhv71417lbpfglF9+YKwJzBcl+ObUUwvwmdCBq7JRZsh7ivbGfXbG7ZzJTiuthAVy/Rx0dcM+mS7DGrJHRIr1er8KpJb0RIFVb4XZRFjsR2fNG7R8gTC93q+oItGHHFLn3xWKXkJxUYUOF1GaNgfXOMQBV+ymhR8DMCF5e4MjQajRDiV+T7NdfQ9VxPBVC5edqSbtKNzIak7pLr/osJInorLelTUf/AjJ33kuEk0GW6zICaqNA3JRcvMpQ3+ueCjSzFES2z5S0cyxcF0OOU0Yw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=WBBTSpNdiwDZqxh01E8ciNhLj9VUVZqkBGQFcvwb8LSIGNJKSaYOagsI0Cpv4T8H/wSrAZO6fmCEgLfKueeIt52KjhaWWeN9fbGIgDp8/WNpaPalGXEPFGh25XjNwfIEKBC4V4F/CYHlybr8x3nfeEMzRUm2sy8U496u394EkVVzkkzRoO9redw9EuEZA9sRHNn9qxUZYk5V2UOZqh9GgAS53kIkb1KOhK+cU7FdoDrBd+e5kOMquvTZkAP5Zo7lkfSQ5NWRFuXXTyDpcOT6+d9PawPNS7F/0iIeqdJxUsZYipOeneEcPeXcxM5e0nZNiifG+to/6IjB76wh47EjmA==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Tamas K Lengyel <tamas@xxxxxxxxxxxxx>, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>, Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
  • Delivery-date: Wed, 01 Dec 2021 10:54:46 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Prior to XSA-304 the only caller merely happened to not use any further
the order value that it passes into the function. Already then this was
a latent issue: The function really should, in the "get" case, hand back
the order the underlying mapping actually uses (or actually the smaller
of the two), such that (going forward) there wouldn't be any action on
unrelated mappings (in particular ones which did already diverge from
the host P2M).

Similarly in the "propagate" case only the smaller of the two orders
should actually get used for creating the new entry, again to avoid
altering mappings which did already diverge from the host P2M.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1826,7 +1826,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
          * altp2m.
          */
         if ( p2m_altp2m_get_or_propagate(p2m, gfn, &mfn, &p2mt,
-                                         &p2ma, page_order) )
+                                         &p2ma, &page_order) )
         {
             /* Entry was copied from host -- retry fault */
             rc = 1;
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2198,10 +2198,11 @@ bool_t p2m_switch_vcpu_altp2m_by_id(stru
  */
 bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
                                  mfn_t *mfn, p2m_type_t *p2mt,
-                                 p2m_access_t *p2ma, unsigned int page_order)
+                                 p2m_access_t *p2ma, unsigned int *page_order)
 {
     p2m_type_t ap2mt;
     p2m_access_t ap2ma;
+    unsigned int cur_order;
     unsigned long mask;
     gfn_t gfn;
     mfn_t amfn;
@@ -2214,7 +2215,10 @@ bool p2m_altp2m_get_or_propagate(struct
      */
     p2m_lock(ap2m);
 
-    amfn = get_gfn_type_access(ap2m, gfn_l, &ap2mt, &ap2ma, 0, NULL);
+    amfn = get_gfn_type_access(ap2m, gfn_l, &ap2mt, &ap2ma, 0, &cur_order);
+
+    if ( cur_order > *page_order )
+        cur_order = *page_order;
 
     if ( !mfn_eq(amfn, INVALID_MFN) )
     {
@@ -2222,6 +2226,7 @@ bool p2m_altp2m_get_or_propagate(struct
         *mfn  = amfn;
         *p2mt = ap2mt;
         *p2ma = ap2ma;
+        *page_order = cur_order;
         return false;
     }
 
@@ -2229,6 +2234,7 @@ bool p2m_altp2m_get_or_propagate(struct
     if ( mfn_eq(*mfn, INVALID_MFN) )
     {
         p2m_unlock(ap2m);
+        *page_order = cur_order;
         return false;
     }
 
@@ -2237,11 +2243,11 @@ bool p2m_altp2m_get_or_propagate(struct
      * to the start of the superpage.  NB that we repupose `amfn`
      * here.
      */
-    mask = ~((1UL << page_order) - 1);
+    mask = ~((1UL << cur_order) - 1);
     amfn = _mfn(mfn_x(*mfn) & mask);
     gfn = _gfn(gfn_l & mask);
 
-    rc = p2m_set_entry(ap2m, gfn, amfn, page_order, *p2mt, *p2ma);
+    rc = p2m_set_entry(ap2m, gfn, amfn, cur_order, *p2mt, *p2ma);
     p2m_unlock(ap2m);
 
     if ( rc )
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -852,7 +852,7 @@ void p2m_flush_altp2m(struct domain *d);
 /* Alternate p2m paging */
 bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
                                  mfn_t *mfn, p2m_type_t *p2mt,
-                                 p2m_access_t *p2ma, unsigned int page_order);
+                                 p2m_access_t *p2ma, unsigned int *page_order);
 
 /* Make a specific alternate p2m valid */
 int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.