[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 2/3] x86/altp2m: p2m_altp2m_get_or_propagate() should honor present page order


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Tue, 4 Jan 2022 10:48:52 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=hCkSWuMSwqATKZ5dxxY4oQpVb+g/5Cx++Q3cRs+RAlc=; b=NnEkS35xlxKGZ2TYI7zFEkgZzlS4wWCNHV7grtlTurnPRNJoupqaBtRNOrbby+mOhh4V023D2Wuh9Y039MeZ5nFhv8qZ4Pl4BZ7gNFHP744bvJ4dGRMv314KoRHWKadlYvOuxIhG0icIdn7/vowBebLvXgqV3PWH0+ajw/gu8BAoj6zc3e9jt8N5t5GRW9IlqhPO1z8yhzVH/CV3QSAjA8c+U8NeyJggip3p575CkPjANXwEJPSJgw6NmJ4zYKJagqsvR9hfxONqosyoTdCI1OBDDVzzJL7bgFGPFptHvdimbBkzFdSePorHNQ36TJVLmO+qA8F70Z/Vt4FJiSfBGw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=P42KkC2a0QnMEAd2YleSb99539qSf7uyFgugtR0u7scqDhy9VZQ5KkeGWDWBCwMew2rAWH5nG6ZpiNGRa1/1DkfhIf/CN/0ZvzaLtwpcSmB/bQsfx1uYiz1K7/U2Wf5ir4+foltPTaHW4HK73ulEV2VGXFRuBP61foGzR3SUyJPrRUf3zxN4MESaiau2CZLKynRLofjf17vj64FJ8DUd4VeNLmHtzk/Bz5u5JYCuZP5fJEYxu8kwOlWEgsfVkY67VMVcUMcX23omXfXW4Hlj1nlP2WSK4US2bcLj0KK1Sx4yFuddUl/CeWrCi/sxbKMlLzjXuu1AKS/isi3E/cyoeg==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Tamas K Lengyel <tamas@xxxxxxxxxxxxx>, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>, Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
  • Delivery-date: Tue, 04 Jan 2022 09:49:00 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Prior to XSA-304 the only caller merely happened to not use any further
the order value that it passes into the function. Already then this was
a latent issue: The function really should, in the "get" case, hand back
the order the underlying mapping actually uses (or actually the smaller
of the two), such that (going forward) there wouldn't be any action on
unrelated mappings (in particular ones which did already diverge from
the host P2M).

Similarly in the "propagate" case only the smaller of the two orders
should actually get used for creating the new entry, again to avoid
altering mappings which did already diverge from the host P2M.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1832,7 +1832,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
          * altp2m.
          */
         if ( p2m_altp2m_get_or_propagate(p2m, gfn, &mfn, &p2mt,
-                                         &p2ma, page_order) )
+                                         &p2ma, &page_order) )
         {
             /* Entry was copied from host -- retry fault */
             rc = 1;
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2198,10 +2198,11 @@ bool_t p2m_switch_vcpu_altp2m_by_id(stru
  */
 bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
                                  mfn_t *mfn, p2m_type_t *p2mt,
-                                 p2m_access_t *p2ma, unsigned int page_order)
+                                 p2m_access_t *p2ma, unsigned int *page_order)
 {
     p2m_type_t ap2mt;
     p2m_access_t ap2ma;
+    unsigned int cur_order;
     unsigned long mask;
     gfn_t gfn;
     mfn_t amfn;
@@ -2214,7 +2215,10 @@ bool p2m_altp2m_get_or_propagate(struct
      */
     p2m_lock(ap2m);
 
-    amfn = get_gfn_type_access(ap2m, gfn_l, &ap2mt, &ap2ma, 0, NULL);
+    amfn = get_gfn_type_access(ap2m, gfn_l, &ap2mt, &ap2ma, 0, &cur_order);
+
+    if ( cur_order > *page_order )
+        cur_order = *page_order;
 
     if ( !mfn_eq(amfn, INVALID_MFN) )
     {
@@ -2222,6 +2226,7 @@ bool p2m_altp2m_get_or_propagate(struct
         *mfn  = amfn;
         *p2mt = ap2mt;
         *p2ma = ap2ma;
+        *page_order = cur_order;
         return false;
     }
 
@@ -2229,6 +2234,7 @@ bool p2m_altp2m_get_or_propagate(struct
     if ( mfn_eq(*mfn, INVALID_MFN) )
     {
         p2m_unlock(ap2m);
+        *page_order = cur_order;
         return false;
     }
 
@@ -2237,11 +2243,11 @@ bool p2m_altp2m_get_or_propagate(struct
      * to the start of the superpage.  NB that we repupose `amfn`
      * here.
      */
-    mask = ~((1UL << page_order) - 1);
+    mask = ~((1UL << cur_order) - 1);
     amfn = _mfn(mfn_x(*mfn) & mask);
     gfn = _gfn(gfn_l & mask);
 
-    rc = p2m_set_entry(ap2m, gfn, amfn, page_order, *p2mt, *p2ma);
+    rc = p2m_set_entry(ap2m, gfn, amfn, cur_order, *p2mt, *p2ma);
     p2m_unlock(ap2m);
 
     if ( rc )
--- a/xen/arch/x86/include/asm/p2m.h
+++ b/xen/arch/x86/include/asm/p2m.h
@@ -852,7 +852,7 @@ void p2m_flush_altp2m(struct domain *d);
 /* Alternate p2m paging */
 bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
                                  mfn_t *mfn, p2m_type_t *p2mt,
-                                 p2m_access_t *p2ma, unsigned int page_order);
+                                 p2m_access_t *p2ma, unsigned int *page_order);
 
 /* Make a specific alternate p2m valid */
 int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.