[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3] x86/Intel: use CPUID bit to determine PPIN availability


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Thu, 27 Jan 2022 13:54:42 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=none; dmarc=none; dkim=none; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=jUcJmN6jlKijIbl/I51T24eZTzV7QGvASK1sSh8ViyY=; b=fyOybCQG1cvVgum08Qsv4R0fVSMvVehI8fHTwFoDQxZ4MaqZXvDCBzotXjIbefxI/UynxJaHW6ulAgnFdlbclW4nG9gBT17O184nngGf5MGlWBburuaHQD0xvv95t2VhIVpnZNbhLrknlNgT8YrqW8dT98r4Eja/v/3e4k8mIldWoGalR/0iCdN8FicTZdG87iBEELAP6FSArBV0IIzrQeo1U8SFAWiDqdi0PyVcOk+u7ftMJctbIPYwvGvsmPMQ3omGRwccQ7W+72GAOuZR8pMZzIgZfg+v0GE0mLldkFM3m6TCyPRXvwXT62uiJgQ1/+RSm2HcIyDHCWN72AeLyQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=EeVpcIlHYW7TNNDudCUbIsw/IhFT2Uv9tYK5R+D9znWIc303GlwbYSCLEQhcj0bAOQFf++aAwZiAZEwExoPFzHCd5eUSBCnBVn6l2tbr9LAx+Ril07aesjycPY5RIrTIAV6Agn/VybM3IDja/ty5pRDMKm/Gr7M/uV3+ImqHf4hSYcZ8zSwxC+msN64bBpOSgTVntC91N4SB9Db5aR9QOYJZko4zDKSdq+5pWsduhIzu2S8msPgUmdxNy1vTn3cxt98HWX8mSfT5kY7Gw4CPNdTvQFy1oyqqebPZrjbp+uPvg8Jb5YMk3VpgxJeGGNMd00CGx0xD0amCBN/zTAgYNQ==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Thu, 27 Jan 2022 12:55:05 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

As of SDM revision 076 there is a CPUID bit for this functionality. Use
it to amend the existing model-based logic.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
It continues to be unclear for which CPU models, if any, the PPIN_CAP
bit in PLATFORM_INFO could be used in favor of a model check.
---
v3: Actually record CPUID[7:1].EBX. Adjust policy <-> feature conversion
    functions.
v2: Don't rename AMD's identifier in xen-cpuid.c. Name Intel's just
    "ppin" as well. Move str_7b1[]. Update a comment.

--- a/tools/misc/xen-cpuid.c
+++ b/tools/misc/xen-cpuid.c
@@ -195,6 +195,11 @@ static const char *const str_e21a[32] =
     [ 6] = "nscb",
 };
 
+static const char *const str_7b1[32] =
+{
+    [ 0] = "ppin",
+};
+
 static const struct {
     const char *name;
     const char *abbr;
@@ -213,6 +218,7 @@ static const struct {
     { "0x00000007:0.edx", "7d0", str_7d0 },
     { "0x00000007:1.eax", "7a1", str_7a1 },
     { "0x80000021.eax",  "e21a", str_e21a },
+    { "0x00000007:1.ebx", "7b1", str_7b1 },
 };
 
 #define COL_ALIGN "18"
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -445,7 +445,8 @@ static void generic_identify(struct cpui
                if (eax > 0)
                        cpuid_count(0x00000007, 1,
                                    
&c->x86_capability[cpufeat_word(X86_FEATURE_AVX512_BF16)],
-                                   &tmp, &tmp, &tmp);
+                                   
&c->x86_capability[cpufeat_word(X86_FEATURE_INTEL_PPIN)],
+                                   &tmp, &tmp);
        }
 
        if (c->cpuid_level >= 0xd)
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -859,12 +859,20 @@ static void intel_init_ppin(const struct
     /*
      * Even if testing the presence of the MSR would be enough, we don't
      * want to risk the situation where other models reuse this MSR for
-     * other purposes.
+     * other purposes.  Despite the late addition of a CPUID bit (rendering
+     * the MSR architectural), keep using the same detection logic there.
      */
     switch ( c->x86_model )
     {
         uint64_t val;
 
+    default:
+        if ( !cpu_has(c, X86_FEATURE_INTEL_PPIN) )
+        {
+            ppin_msr = 0;
+            return;
+        }
+        fallthrough;
     case 0x3e: /* IvyBridge X */
     case 0x3f: /* Haswell X */
     case 0x4f: /* Broadwell X */
--- a/xen/include/public/arch-x86/cpufeatureset.h
+++ b/xen/include/public/arch-x86/cpufeatureset.h
@@ -299,6 +299,9 @@ XEN_CPUFEATURE(FSRCS,        10*32+12) /
 XEN_CPUFEATURE(LFENCE_DISPATCH,    11*32+ 2) /*A  LFENCE always serializing */
 XEN_CPUFEATURE(NSCB,               11*32+ 6) /*A  Null Selector Clears Base 
(and limit too) */
 
+/* Intel-defined CPU features, CPUID level 0x00000007:1.ebx, word 12 */
+XEN_CPUFEATURE(INTEL_PPIN,         12*32+ 0) /*   Protected Processor 
Inventory Number */
+
 #endif /* XEN_CPUFEATURE */
 
 /* Clean up from a default include.  Close the enum (for C). */
--- a/xen/include/xen/lib/x86/cpuid.h
+++ b/xen/include/xen/lib/x86/cpuid.h
@@ -16,6 +16,7 @@
 #define FEATURESET_7d0    9 /* 0x00000007:0.edx    */
 #define FEATURESET_7a1   10 /* 0x00000007:1.eax    */
 #define FEATURESET_e21a  11 /* 0x80000021.eax      */
+#define FEATURESET_7b1   12 /* 0x00000007:1.ebx    */
 
 struct cpuid_leaf
 {
@@ -188,6 +189,10 @@ struct cpuid_policy
                 uint32_t _7a1;
                 struct { DECL_BITFIELD(7a1); };
             };
+            union {
+                uint32_t _7b1;
+                struct { DECL_BITFIELD(7b1); };
+            };
         };
     } feat;
 
@@ -327,6 +332,7 @@ static inline void cpuid_policy_to_featu
     fs[FEATURESET_7d0] = p->feat._7d0;
     fs[FEATURESET_7a1] = p->feat._7a1;
     fs[FEATURESET_e21a] = p->extd.e21a;
+    fs[FEATURESET_7b1] = p->feat._7b1;
 }
 
 /* Fill in a CPUID policy from a featureset bitmap. */
@@ -345,6 +351,7 @@ static inline void cpuid_featureset_to_p
     p->feat._7d0  = fs[FEATURESET_7d0];
     p->feat._7a1  = fs[FEATURESET_7a1];
     p->extd.e21a  = fs[FEATURESET_e21a];
+    p->feat._7b1  = fs[FEATURESET_7b1];
 }
 
 static inline uint64_t cpuid_policy_xcr0_max(const struct cpuid_policy *p)




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.