[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 07/11] x86: Migrate MSR handler vendor checks to x86_vendor_is()


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
  • Date: Wed, 26 Nov 2025 17:44:09 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=ezvPIICh4zSuhQ9u1lde9uJGYUuIAHWu9bk/o4mRgZA=; b=G7D1ZoInxL/0gleguyBxiEKgI0wZC95RTDUJtTjwMcYwcYjG5l9d2ZA3GxsNjl2O0XG6t7dq4+Ai8CsWcOs6GO2b2l98fO1cLUSdpdI9P0fF3v27K7RBXSjrDdcNo/hEVpFlsNsqb/6TT+wQtLcBqgKHa1LWIwYhaqpfYbkkt0js6PXNxYeYz+UQMX3fVPPMazkz3iljOVJvfAa7I0nlmE2RDTcluo7Pj0gafn5e7EAFh/lWm/G2tAK33d56lxDrIMh7sZx5vyN1mTIuPwp00zZXwQgigZzBnJEyYMvMbxfzgZNq2b9AhY3T2K8VBSqUwlC/Pj8hq8RucbluRfxC+g==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=haZuDJJUQR954/QVXW2q4DpgQLxlnqJ4IUMVqTpP+H/3aQP++S5C4IKnQA0CJVoZaBoYOXKLMe3/UrDYrbEngDv9IovGYX/q3kIsuGg2ascxydLsr/q8VmmtdrgpCQy9D6DgvnPoGV/TIErp+1Y95MNIjAof0A/MqInsRmDGDRsTzMzoNK8p8GX8kkk+la8J2FN7urZ/+WI4zjIHuvzgtRbJI1AVAsI2Gms7eS68z0sanM/IRYdU8MigBG+kka6kbU+xir8kRxxxiAudZGnIahUjzxuioBq0YcH2p6vD8EwfIe170rCzYgJ0E9ZPYcxhHuBfduX8RDkbrtKh+Ur4Gg==
  • Cc: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Jason Andryuk <jason.andryuk@xxxxxxx>, Xenia Ragiadakou <xenia.ragiadakou@xxxxxxx>, "Stefano Stabellini" <sstabellini@xxxxxxxxxx>
  • Delivery-date: Wed, 26 Nov 2025 16:45:28 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Many handlers are vendor-specific and are currently gated on runtime
checks. If we migrate those to x86_vendor_is() they will effectively
cause the ellision of handling code for CPU vendors not compiled in.

Not a functional change.

Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
 xen/arch/x86/msr.c | 41 +++++++++++++++++++++++------------------
 1 file changed, 23 insertions(+), 18 deletions(-)

diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index ad75a2e108..f022332971 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -157,8 +157,9 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
          * The MSR has existed on all Intel parts since before the 64bit days,
          * and is implemented by other vendors.
          */
-        if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_CENTAUR |
-                                 X86_VENDOR_SHANGHAI)) )
+        if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL   |
+                                            X86_VENDOR_CENTAUR |
+                                            X86_VENDOR_SHANGHAI) )
             goto gp_fault;
 
         *val = IA32_FEATURE_CONTROL_LOCK;
@@ -169,8 +170,8 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
         break;
 
     case MSR_IA32_PLATFORM_ID:
-        if ( !(cp->x86_vendor & X86_VENDOR_INTEL) ||
-             !(boot_cpu_data.x86_vendor & X86_VENDOR_INTEL) )
+        if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL) ||
+             !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
             goto gp_fault;
         rdmsrl(MSR_IA32_PLATFORM_ID, *val);
         break;
@@ -189,9 +190,10 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t 
*val)
          * from Xen's last microcode load, which can be forwarded straight to
          * the guest.
          */
-        if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_AMD)) ||
-             !(boot_cpu_data.x86_vendor &
-               (X86_VENDOR_INTEL | X86_VENDOR_AMD)) ||
+        if ( !x86_vendor_is(cp->x86_vendor,
+                            X86_VENDOR_INTEL | X86_VENDOR_AMD) ||
+             !x86_vendor_is(boot_cpu_data.x86_vendor,
+                            X86_VENDOR_INTEL | X86_VENDOR_AMD) ||
              rdmsr_safe(MSR_AMD_PATCHLEVEL, val) )
             goto gp_fault;
         break;
@@ -236,7 +238,8 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
          */
     case MSR_IA32_PERF_STATUS:
     case MSR_IA32_PERF_CTL:
-        if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_CENTAUR)) )
+        if ( !x86_vendor_is(cp->x86_vendor,
+                            X86_VENDOR_INTEL | X86_VENDOR_CENTAUR) )
             goto gp_fault;
 
         *val = 0;
@@ -245,7 +248,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
         goto gp_fault;
 
     case MSR_IA32_THERM_STATUS:
-        if ( cp->x86_vendor != X86_VENDOR_INTEL )
+        if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL) )
             goto gp_fault;
         *val = 0;
         break;
@@ -302,7 +305,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
     case MSR_K8_IORR_MASK1:
     case MSR_K8_TSEG_BASE:
     case MSR_K8_TSEG_MASK:
-        if ( !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+        if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) 
)
             goto gp_fault;
         if ( !is_hardware_domain(d) )
             return X86EMUL_UNHANDLEABLE;
@@ -314,14 +317,15 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t 
*val)
         break;
 
     case MSR_K8_HWCR:
-        if ( !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+        if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) 
)
             goto gp_fault;
         *val = 0;
         break;
 
     case MSR_FAM10H_MMIO_CONF_BASE:
         if ( !is_hardware_domain(d) ||
-             !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
+             !x86_vendor_is(cp->x86_vendor,
+                            X86_VENDOR_AMD | X86_VENDOR_HYGON) ||
              rdmsr_safe(msr, val) )
             goto gp_fault;
 
@@ -338,7 +342,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
         break;
 
     case MSR_AMD64_DE_CFG:
-        if ( !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+        if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) 
)
             goto gp_fault;
         *val = AMD64_DE_CFG_LFENCE_SERIALISE;
         break;
@@ -461,7 +465,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
          * for backwards compatiblity, the OS should write 0 to it before
          * trying to access the current microcode version.
          */
-        if ( cp->x86_vendor != X86_VENDOR_INTEL || val != 0 )
+        if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL) || val != 0 )
             goto gp_fault;
         break;
 
@@ -471,7 +475,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
          * to AMD CPUs as well (at least the architectural/CPUID part does).
          */
         if ( is_pv_domain(d) ||
-             cp->x86_vendor != X86_VENDOR_AMD )
+             !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD) )
             goto gp_fault;
         break;
 
@@ -483,7 +487,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
          * by any CPUID bit.
          */
         if ( is_pv_domain(d) ||
-             cp->x86_vendor != X86_VENDOR_INTEL )
+             !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL) )
             goto gp_fault;
         break;
 
@@ -553,7 +557,8 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
          * a cpufreq controller dom0 which has full access.
          */
     case MSR_IA32_PERF_CTL:
-        if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_CENTAUR)) )
+        if ( !x86_vendor_is(cp->x86_vendor,
+                            X86_VENDOR_INTEL | X86_VENDOR_CENTAUR) )
             goto gp_fault;
 
         if ( likely(!is_cpufreq_controller(d)) || wrmsr_safe(msr, val) == 0 )
@@ -663,7 +668,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
          * OpenBSD 6.7 will panic if writing to DE_CFG triggers a #GP:
          * https://www.illumos.org/issues/12998 - drop writes.
          */
-        if ( !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+        if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) 
)
             goto gp_fault;
         break;
 
-- 
2.43.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.