[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/6] x86/mm: prefer is_..._domain() over is_..._vcpu()



... when the domain pointer is already available.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -159,7 +159,7 @@ guest_walk_tables(struct vcpu *v, struct
     mflags = mandatory_flags(v, pfec);
     iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
 
-    if ( is_hvm_vcpu(v) && !(pfec & PFEC_user_mode) )
+    if ( is_hvm_domain(d) && !(pfec & PFEC_user_mode) )
     {
         struct segment_register seg;
         const struct cpu_user_regs *regs = guest_cpu_user_regs();
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3157,12 +3157,10 @@ void shadow_teardown(struct domain *d)
      * destroyed any shadows of it or sh_destroy_shadow will get confused. */
     if ( !pagetable_is_null(d->arch.paging.shadow.unpaged_pagetable) )
     {
+        ASSERT(is_hvm_domain(d));
         for_each_vcpu(d, v)
-        {
-            ASSERT(is_hvm_vcpu(v));
             if ( !hvm_paging_enabled(v) )
                 v->arch.guest_table = pagetable_null();
-        }
         unpaged_pagetable =
             pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable);
         d->arch.paging.shadow.unpaged_pagetable = pagetable_null();
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2043,7 +2043,7 @@ void sh_destroy_monitor_table(struct vcp
         sh_unmap_domain_page(l3e);
         shadow_free(d, m3mfn);
 
-        if ( is_pv_32on64_vcpu(v) )
+        if ( is_pv_32on64_domain(d) )
         {
             /* Need to destroy the l3 and l2 monitor pages that map the
              * Xen VAs at 3GB-4GB */
@@ -3963,7 +3963,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
                    (unsigned long)pagetable_get_pfn(v->arch.guest_table));
 
 #if GUEST_PAGING_LEVELS == 4
-    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_vcpu(v) )
+    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_domain(d) )
         gmfn = pagetable_get_mfn(v->arch.guest_table_user);
     else
 #endif
@@ -4674,7 +4674,7 @@ static void *emulate_map_dest(struct vcp
 
         /* Cross-page emulated writes are only supported for HVM guests;
          * PV guests ought to know better */
-        if ( !is_hvm_vcpu(v) )
+        if ( !is_hvm_domain(d) )
             return MAPPING_UNHANDLEABLE;
 
         /* This write crosses a page boundary.  Translate the second page */
@@ -5104,7 +5104,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
             gmfn = get_shadow_status(d, get_gfn_query_unlocked(
                                         d, gfn_x(gfn), &p2mt),
                                      ((GUEST_PAGING_LEVELS == 3 ||
-                                       is_pv_32on64_vcpu(v))
+                                       is_pv_32on64_domain(d))
                                       && !shadow_mode_external(d)
                                       && (guest_index(gl3e) % 4) == 3)
                                      ? SH_type_l2h_shadow



Attachment: use-is_X_domain-x86-mm.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.