[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/9] x86/pagewalk: Use pointer syntax for pfec parameter



It is a pointer, not an array.

No functional change.

Requested-by: Jan Beulich <jbeulich@xxxxxxxx>
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

v2:
 * New
---
 xen/arch/x86/mm/hap/guest_walk.c | 24 ++++++++++++------------
 xen/arch/x86/mm/shadow/multi.c   | 12 ++++++------
 xen/include/asm-x86/paging.h     |  7 ++++---
 3 files changed, 22 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c
index 313f82f..e202c9a 100644
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -65,7 +65,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
     if ( p2m_is_paging(p2mt) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
-        pfec[0] = PFEC_page_paged;
+        *pfec = PFEC_page_paged;
         if ( top_page )
             put_page(top_page);
         p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
@@ -73,14 +73,14 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
     }
     if ( p2m_is_shared(p2mt) )
     {
-        pfec[0] = PFEC_page_shared;
+        *pfec = PFEC_page_shared;
         if ( top_page )
             put_page(top_page);
         return gfn_x(INVALID_GFN);
     }
     if ( !top_page )
     {
-        pfec[0] &= ~PFEC_page_present;
+        *pfec &= ~PFEC_page_present;
         goto out_tweak_pfec;
     }
     top_mfn = _mfn(page_to_mfn(top_page));
@@ -91,7 +91,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
 #if GUEST_PAGING_LEVELS == 3
     top_map += (cr3 & ~(PAGE_MASK | 31));
 #endif
-    missing = guest_walk_tables(v, p2m, ga, &gw, pfec[0], top_mfn, top_map);
+    missing = guest_walk_tables(v, p2m, ga, &gw, *pfec, top_mfn, top_map);
     unmap_domain_page(top_map);
     put_page(top_page);
 
@@ -107,13 +107,13 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
         if ( p2m_is_paging(p2mt) )
         {
             ASSERT(p2m_is_hostp2m(p2m));
-            pfec[0] = PFEC_page_paged;
+            *pfec = PFEC_page_paged;
             p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
             return gfn_x(INVALID_GFN);
         }
         if ( p2m_is_shared(p2mt) )
         {
-            pfec[0] = PFEC_page_shared;
+            *pfec = PFEC_page_shared;
             return gfn_x(INVALID_GFN);
         }
 
@@ -124,19 +124,19 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
     }
 
     if ( missing & _PAGE_PRESENT )
-        pfec[0] &= ~PFEC_page_present;
+        *pfec &= ~PFEC_page_present;
 
     if ( missing & _PAGE_INVALID_BITS ) 
-        pfec[0] |= PFEC_reserved_bit;
+        *pfec |= PFEC_reserved_bit;
 
     if ( missing & _PAGE_PKEY_BITS )
-        pfec[0] |= PFEC_prot_key;
+        *pfec |= PFEC_prot_key;
 
     if ( missing & _PAGE_PAGED )
-        pfec[0] = PFEC_page_paged;
+        *pfec = PFEC_page_paged;
 
     if ( missing & _PAGE_SHARED )
-        pfec[0] = PFEC_page_shared;
+        *pfec = PFEC_page_shared;
 
  out_tweak_pfec:
     /*
@@ -144,7 +144,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
      * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled.
      */
     if ( !hvm_nx_enabled(v) && !hvm_smep_enabled(v) )
-        pfec[0] &= ~PFEC_insn_fetch;
+        *pfec &= ~PFEC_insn_fetch;
 
     return gfn_x(INVALID_GFN);
 }
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 7ea9d81..d9bf212 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3727,30 +3727,30 @@ sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
     /* Check the vTLB cache first */
-    unsigned long vtlb_gfn = vtlb_lookup(v, va, pfec[0]);
+    unsigned long vtlb_gfn = vtlb_lookup(v, va, *pfec);
     if ( VALID_GFN(vtlb_gfn) )
         return vtlb_gfn;
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
-    if ( (missing = sh_walk_guest_tables(v, va, &gw, pfec[0])) != 0 )
+    if ( (missing = sh_walk_guest_tables(v, va, &gw, *pfec)) != 0 )
     {
         if ( (missing & _PAGE_PRESENT) )
-            pfec[0] &= ~PFEC_page_present;
+            *pfec &= ~PFEC_page_present;
         if ( missing & _PAGE_INVALID_BITS )
-            pfec[0] |= PFEC_reserved_bit;
+            *pfec |= PFEC_reserved_bit;
         /*
          * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS:
          * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled.
          */
         if ( is_hvm_vcpu(v) && !hvm_nx_enabled(v) && !hvm_smep_enabled(v) )
-            pfec[0] &= ~PFEC_insn_fetch;
+            *pfec &= ~PFEC_insn_fetch;
         return gfn_x(INVALID_GFN);
     }
     gfn = guest_walk_to_gfn(&gw);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
     /* Remember this successful VA->GFN translation for later. */
-    vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), pfec[0]);
+    vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), *pfec);
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
     return gfn_x(gfn);
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 2f5befc..f262c9e 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -242,12 +242,13 @@ paging_fault(unsigned long va, struct cpu_user_regs *regs)
 /* Handle invlpg requests on vcpus. */
 void paging_invlpg(struct vcpu *v, unsigned long va);
 
-/* Translate a guest virtual address to the frame number that the
+/*
+ * Translate a guest virtual address to the frame number that the
  * *guest* pagetables would map it to.  Returns INVALID_GFN if the guest
  * tables don't map this address for this kind of access.
- * pfec[0] is used to determine which kind of access this is when
+ * *pfec is used to determine which kind of access this is when
  * walking the tables.  The caller should set the PFEC_page_present bit
- * in pfec[0]; in the failure case, that bit will be cleared if appropriate.
+ * in *pfec; in the failure case, that bit will be cleared if appropriate.
  *
  * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS:
  * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled.
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.