[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/2] x86/shadow: adjust some shadow_set_l<N>e() callers


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Wed, 13 Oct 2021 17:37:49 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=Jy7HwRHaNovftUjQsJ1pQLTkbsHkl1IkKdFl9OaItS4=; b=HOw/hlGPPLjsPzFpwXS878Gvwu9pmOu+zbuh3QKfM4OBrBb2YlcN+KS+e/l88I4QVbZ9i5LI35jGkHBi4I165cJ/lqrh2CSr5AZNrfaK8ABm3CE3ZK2A1IZeA0w7111yaJmIuji7oQ5DU+VGCTWuIjLReQZTU1DapQA/hSK94ANqyqMGV/7tAS9FW0Ofp2Fww5tVwsywXJ1IizwMX/xlkhr+CqMqwGCcopbwxZe6RX8WU/6lmYSsMVK2tbqZbIG10OGPeHwBP3UNYzATPThuxVrcu5n6nc+HMu3kvz6fRAeuO+s1idkOzhVxNeMgWaU5c//seOyjTiWIbevlJF9QYA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=cC4UdQ3/QTEhlmKT+TbGN/O2aw53lOhVuou2XEF2TY4Ez8/VTZF0qkU1bwG2SDUBdKn1FrQ72oX+rzZZ9/ZjQJk+TiU5LTLPZTVdaINRNLVOjxD/EZ8zkSEBVq8x16jYb2s/tHW1FEWCppHJKzvaDiJAStAHKoUAynQcWjNnd40j5GGGE/yXZ/6wRMEoo1/a69CfWHEFtN+YlQmIntIo45IypsBm0CrYdGcAZyZnspF1s/e8AsheLg7X41fCI8GvlQlDdVenUh5T0PZ5+YNTmTQHhm5/yXFN5rZBqnI1MxlZEgeUPVDQf7bxd1aD78mux+3xzrd7xINGPTYtQ29DVg==
  • Authentication-results: citrix.com; dkim=none (message not signed) header.d=none;citrix.com; dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Tim Deegan <tim@xxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>
  • Delivery-date: Wed, 13 Oct 2021 15:37:58 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Coverity dislikes sh_page_fault() storing the return value into a local
variable but then never using the value (and oddly enough spots this in
the 2- and 3-level cases, but not in the 4-level one). Instead of adding
yet another cast to void as replacement, take the opportunity and drop a
bunch of such casts at the same time - not using function return values
is a common thing to do. (It of course is an independent question
whether ignoring errors like this is a good idea.)

Coverity-ID: 1492856
Coverity-ID: 1492858
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1416,7 +1416,7 @@ void sh_unhook_32b_mappings(struct domai
     shadow_l2e_t *sl2e;
     SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
         if ( !user_only || (sl2e->l2 & _PAGE_USER) )
-            (void) shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn);
+            shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn);
     });
 }
 
@@ -1428,7 +1428,7 @@ void sh_unhook_pae_mappings(struct domai
     shadow_l2e_t *sl2e;
     SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
         if ( !user_only || (sl2e->l2 & _PAGE_USER) )
-            (void) shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn);
+            shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn);
     });
 }
 
@@ -1439,7 +1439,7 @@ void sh_unhook_64b_mappings(struct domai
     shadow_l4e_t *sl4e;
     SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, d, {
         if ( !user_only || (sl4e->l4 & _PAGE_USER) )
-            (void) shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn);
+            shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn);
     });
 }
 
@@ -1969,7 +1969,7 @@ static void sh_prefetch(struct vcpu *v,
 
         /* Propagate the entry.  */
         l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
-        (void) shadow_set_l1e(d, ptr_sl1e + i, sl1e, p2mt, sl1mfn);
+        shadow_set_l1e(d, ptr_sl1e + i, sl1e, p2mt, sl1mfn);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         if ( snpl1p != NULL )
@@ -2534,7 +2534,7 @@ static int sh_page_fault(struct vcpu *v,
 
     /* Calculate the shadow entry and write it */
     l1e_propagate_from_guest(v, gw.l1e, gmfn, &sl1e, ft, p2mt);
-    r = shadow_set_l1e(d, ptr_sl1e, sl1e, p2mt, sl1mfn);
+    shadow_set_l1e(d, ptr_sl1e, sl1e, p2mt, sl1mfn);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     if ( mfn_valid(gw.l1mfn)
@@ -3014,8 +3014,7 @@ static bool sh_invlpg(struct vcpu *v, un
                 shadow_l1e_t *sl1;
                 sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(linear);
                 /* Remove the shadow entry that maps this VA */
-                (void) shadow_set_l1e(d, sl1, shadow_l1e_empty(),
-                                      p2m_invalid, sl1mfn);
+                shadow_set_l1e(d, sl1, shadow_l1e_empty(), p2m_invalid, 
sl1mfn);
             }
             paging_unlock(d);
             /* Need the invlpg, to pick up the disappeareance of the sl1e */
@@ -3608,7 +3607,8 @@ int sh_rm_write_access_from_l1(struct do
              && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) )
         {
             shadow_l1e_t ro_sl1e = shadow_l1e_remove_flags(*sl1e, _PAGE_RW);
-            (void) shadow_set_l1e(d, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn);
+
+            shadow_set_l1e(d, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn);
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
             /* Remember the last shadow that we shot a writeable mapping in */
             if ( curr->domain == d )
@@ -3637,8 +3637,7 @@ int sh_rm_mappings_from_l1(struct domain
         if ( (flags & _PAGE_PRESENT)
              && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(target_mfn)) )
         {
-            (void) shadow_set_l1e(d, sl1e, shadow_l1e_empty(),
-                                  p2m_invalid, sl1mfn);
+            shadow_set_l1e(d, sl1e, shadow_l1e_empty(), p2m_invalid, sl1mfn);
             if ( sh_check_page_has_no_refs(mfn_to_page(target_mfn)) )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;
@@ -3656,20 +3655,20 @@ void sh_clear_shadow_entry(struct domain
     switch ( mfn_to_page(smfn)->u.sh.type )
     {
     case SH_type_l1_shadow:
-        (void) shadow_set_l1e(d, ep, shadow_l1e_empty(), p2m_invalid, smfn);
+        shadow_set_l1e(d, ep, shadow_l1e_empty(), p2m_invalid, smfn);
         break;
     case SH_type_l2_shadow:
 #if GUEST_PAGING_LEVELS >= 4
     case SH_type_l2h_shadow:
 #endif
-        (void) shadow_set_l2e(d, ep, shadow_l2e_empty(), smfn);
+        shadow_set_l2e(d, ep, shadow_l2e_empty(), smfn);
         break;
 #if GUEST_PAGING_LEVELS >= 4
     case SH_type_l3_shadow:
-        (void) shadow_set_l3e(d, ep, shadow_l3e_empty(), smfn);
+        shadow_set_l3e(d, ep, shadow_l3e_empty(), smfn);
         break;
     case SH_type_l4_shadow:
-        (void) shadow_set_l4e(d, ep, shadow_l4e_empty(), smfn);
+        shadow_set_l4e(d, ep, shadow_l4e_empty(), smfn);
         break;
 #endif
     default: BUG(); /* Called with the wrong kind of shadow. */
@@ -3689,7 +3688,7 @@ int sh_remove_l1_shadow(struct domain *d
         if ( (flags & _PAGE_PRESENT)
              && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
         {
-            (void) shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn);
+            shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn);
             if ( mfn_to_page(sl1mfn)->u.sh.type == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;
@@ -3712,7 +3711,7 @@ int sh_remove_l2_shadow(struct domain *d
         if ( (flags & _PAGE_PRESENT)
              && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
         {
-            (void) shadow_set_l3e(d, sl3e, shadow_l3e_empty(), sl3mfn);
+            shadow_set_l3e(d, sl3e, shadow_l3e_empty(), sl3mfn);
             if ( mfn_to_page(sl2mfn)->u.sh.type == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;
@@ -3734,7 +3733,7 @@ int sh_remove_l3_shadow(struct domain *d
         if ( (flags & _PAGE_PRESENT)
              && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
         {
-            (void) shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn);
+            shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn);
             if ( mfn_to_page(sl3mfn)->u.sh.type == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.