[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/3] x86/mm: make guest_physmap_add_entry() HVM-only



Lift its !paging_mode_translate() part into guest_physmap_add_page()
(which is what common code calls), eliminating the dummy use of a
(HVM-only really) P2M type in the PV case.

Suggested-by: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: New.

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -813,22 +813,14 @@ guest_physmap_remove_page(struct domain
 }
 
 int
-guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
-                        unsigned int page_order, p2m_type_t t)
+guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+                       unsigned int page_order)
 {
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    unsigned long i;
-    gfn_t ogfn;
-    p2m_type_t ot;
-    p2m_access_t a;
-    mfn_t omfn;
-    int pod_count = 0;
-    int rc = 0;
-
     /* IOMMU for PV guests is handled in get_page_type() and put_page(). */
     if ( !paging_mode_translate(d) )
     {
         struct page_info *page = mfn_to_page(mfn);
+        unsigned long i;
 
         /*
          * Our interface for PV guests wrt IOMMU entries hasn't been very
@@ -841,7 +833,7 @@ guest_physmap_add_entry(struct domain *d
          * any guest-requested type changes succeed and remove the IOMMU
          * entry).
          */
-        if ( !need_iommu_pt_sync(d) || t != p2m_ram_rw )
+        if ( !need_iommu_pt_sync(d) )
             return 0;
 
         for ( i = 0; i < (1UL << page_order); ++i, ++page )
@@ -855,6 +847,29 @@ guest_physmap_add_entry(struct domain *d
         return 0;
     }
 
+    return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
+}
+
+#ifdef CONFIG_HVM
+int
+guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
+                        unsigned int page_order, p2m_type_t t)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    unsigned long i;
+    gfn_t ogfn;
+    p2m_type_t ot;
+    p2m_access_t a;
+    mfn_t omfn;
+    int pod_count = 0;
+    int rc = 0;
+
+    if ( !paging_mode_translate(d) )
+    {
+        ASSERT_UNREACHABLE();
+        return -EPERM;
+    }
+
     /* foreign pages are added thru p2m_add_foreign */
     if ( p2m_is_foreign(t) )
         return -EINVAL;
@@ -978,7 +993,6 @@ guest_physmap_add_entry(struct domain *d
                  gfn_x(gfn), mfn_x(mfn));
         rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order,
                            p2m_invalid, p2m->default_access);
-#ifdef CONFIG_HVM
         if ( rc == 0 )
         {
             pod_lock(p2m);
@@ -986,7 +1000,6 @@ guest_physmap_add_entry(struct domain *d
             BUG_ON(p2m->pod.entry_count < 0);
             pod_unlock(p2m);
         }
-#endif
     }
 
 out:
@@ -994,7 +1007,7 @@ out:
 
     return rc;
 }
-
+#endif
 
 /*
  * Modify the p2m type of a single gfn from ot to nt.
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -587,14 +587,9 @@ int guest_physmap_add_entry(struct domai
                             mfn_t mfn, unsigned int page_order,
                             p2m_type_t t);
 
-/* Untyped version for RAM only, for compatibility */
-static inline int guest_physmap_add_page(struct domain *d,
-                                         gfn_t gfn,
-                                         mfn_t mfn,
-                                         unsigned int page_order)
-{
-    return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
-}
+/* Untyped version for RAM only, for compatibility and PV. */
+int guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+                           unsigned int page_order);
 
 /* Set a p2m range as populate-on-demand */
 int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.