[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 05/12] x86/p2m: change_entry_type_* hooks are HVM-only



Exclude functions using them from !HVM builds, thus making it possible
to exclude the hooks as well. Also cover the already unused
memory_type_changed hook while inserting the #ifdef in the struct.

While no respective check was there, I can't see how
XEN_DOMCTL_set_broken_page_p2m could have been meant to work for PV the
way it is implemented. Restrict this operation to acting on HVM guests.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: New.

--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1080,9 +1080,12 @@ long arch_do_domctl(
     {
         p2m_type_t pt;
         unsigned long pfn = domctl->u.set_broken_page_p2m.pfn;
-        mfn_t mfn = get_gfn_query(d, pfn, &pt);
 
-        if ( unlikely(!mfn_valid(mfn)) || unlikely(!p2m_is_ram(pt)) )
+        if ( !is_hvm_domain(d) )
+            return -EINVAL;
+
+        if ( unlikely(!mfn_valid(get_gfn_query(d, pfn, &pt))) ||
+             unlikely(!p2m_is_ram(pt)) )
             ret = -EINVAL;
         else
             ret = p2m_change_type_one(d, pfn, pt, p2m_ram_broken);
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -292,6 +292,8 @@ int p2m_is_logdirty_range(struct p2m_dom
     return 0;
 }
 
+#ifdef CONFIG_HVM
+
 static void change_entry_type_global(struct p2m_domain *p2m,
                                      p2m_type_t ot, p2m_type_t nt)
 {
@@ -316,7 +318,6 @@ void p2m_change_entry_type_global(struct
 
     change_entry_type_global(hostp2m, ot, nt);
 
-#ifdef CONFIG_HVM
     if ( unlikely(altp2m_active(d)) )
     {
         unsigned int i;
@@ -331,12 +332,10 @@ void p2m_change_entry_type_global(struct
                 p2m_unlock(altp2m);
             }
     }
-#endif
 
     p2m_unlock(hostp2m);
 }
 
-#ifdef CONFIG_HVM
 /* There's already a memory_type_changed() in asm/mtrr.h. */
 static void _memory_type_changed(struct p2m_domain *p2m)
 {
@@ -369,7 +368,8 @@ void p2m_memory_type_changed(struct doma
 
     p2m_unlock(hostp2m);
 }
-#endif
+
+#endif /* CONFIG_HVM */
 
 int p2m_set_ioreq_server(struct domain *d,
                          unsigned int flags,
@@ -876,6 +876,7 @@ guest_physmap_add_page(struct domain *d,
 }
 
 #ifdef CONFIG_HVM
+
 int
 guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
                         unsigned int page_order, p2m_type_t t)
@@ -1024,7 +1025,6 @@ out:
 
     return rc;
 }
-#endif
 
 /*
  * Modify the p2m type of a single gfn from ot to nt.
@@ -1161,7 +1161,6 @@ void p2m_change_type_range(struct domain
 
     change_type_range(hostp2m, start, end, ot, nt);
 
-#ifdef CONFIG_HVM
     if ( unlikely(altp2m_active(d)) )
     {
         unsigned int i;
@@ -1176,7 +1175,6 @@ void p2m_change_type_range(struct domain
                 p2m_unlock(altp2m);
             }
     }
-#endif
     hostp2m->defer_nested_flush = 0;
     if ( nestedhvm_enabled(d) )
         p2m_flush_nestedp2m(d);
@@ -1184,6 +1182,8 @@ void p2m_change_type_range(struct domain
     p2m_unlock(hostp2m);
 }
 
+#endif /* CONFIG_HVM */
+
 /*
  * Finish p2m type change for gfns which are marked as need_recalc in a range.
  * Uses the current p2m's max_mapped_pfn to further clip the invalidation
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -324,6 +324,7 @@ p2m_next_level(struct p2m_domain *p2m, v
     return rc;
 }
 
+#ifdef CONFIG_HVM
 /*
  * Mark (via clearing the U flag) as needing P2M type re-calculation all valid
  * present entries at the targeted level for the passed in GFN range, which is
@@ -392,6 +393,7 @@ static int p2m_pt_set_recalc_range(struc
 
     return err;
 }
+#endif /* CONFIG_HVM */
 
 /*
  * Handle possibly necessary P2M type re-calculation (U flag clear for a
@@ -930,6 +932,8 @@ pod_retry_l1:
     return (p2m_is_valid(*t) || p2m_is_any_ram(*t)) ? mfn : INVALID_MFN;
 }
 
+#ifdef CONFIG_HVM
+
 static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m,
                                             p2m_type_t ot, p2m_type_t nt)
 {
@@ -1011,6 +1015,8 @@ static int p2m_pt_change_entry_type_rang
     return err;
 }
 
+#endif /* CONFIG_HVM */
+
 #if P2M_AUDIT
 static long p2m_pt_audit_p2m(struct p2m_domain *p2m)
 {
@@ -1168,8 +1174,10 @@ void p2m_pt_init(struct p2m_domain *p2m)
     p2m->set_entry = p2m_pt_set_entry;
     p2m->get_entry = p2m_pt_get_entry;
     p2m->recalc = do_recalc;
+#ifdef CONFIG_HVM
     p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
     p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
+#endif
 
     /* Still too early to use paging_mode_hap(). */
     if ( hap_enabled(p2m->domain) )
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -588,6 +588,7 @@ static int paging_log_dirty_op(struct do
     return rv;
 }
 
+#ifdef CONFIG_HVM
 void paging_log_dirty_range(struct domain *d,
                            unsigned long begin_pfn,
                            unsigned long nr,
@@ -617,6 +618,7 @@ void paging_log_dirty_range(struct domai
 
     guest_flush_tlb_mask(d, d->dirty_cpumask);
 }
+#endif
 
 /*
  * Callers must supply log_dirty_ops for the log dirty code to call. This
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -264,6 +264,7 @@ struct p2m_domain {
     void               (*enable_hardware_log_dirty)(struct p2m_domain *p2m);
     void               (*disable_hardware_log_dirty)(struct p2m_domain *p2m);
     void               (*flush_hardware_cached_dirty)(struct p2m_domain *p2m);
+#ifdef CONFIG_HVM
     void               (*change_entry_type_global)(struct p2m_domain *p2m,
                                                    p2m_type_t ot,
                                                    p2m_type_t nt);
@@ -272,6 +273,7 @@ struct p2m_domain {
                                                   unsigned long first_gfn,
                                                   unsigned long last_gfn);
     void               (*memory_type_changed)(struct p2m_domain *p2m);
+#endif
     void               (*write_p2m_entry_pre)(struct domain *d,
                                               unsigned long gfn,
                                               l1_pgentry_t old,




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.