|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH 03/11] x86: Add x86_vendor_is() by itself before using it
This function is meant to replace all instances of the following
patterns in CPU policies and boot_cpu_data:
- x->x86_vendor == X86_VENDOR_FOO
- x->x86_vendor != X86_VENDOR_FOO
- x->x86_vendor & (X86_VENDOR_FOO | X86_VENDOR_BAR)
The secret sauce is that all branches inside the helper resolve at
compile time, so for the all-vendors-compiled-in case the function
resolves to equivalent code as that without the helper and you get
progressively more aggressive DCE as you disable vendors. The function
folds into a constant once you remove the fallback CPU vendor setting.
While at this, move an include out of place so they sort alphabetically.
Not a functional change.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
xen/arch/x86/include/asm/cpuid.h | 49 +++++++++++++++++++++++++++++++-
1 file changed, 48 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/include/asm/cpuid.h b/xen/arch/x86/include/asm/cpuid.h
index bf1c635cdd..a4280d1b0d 100644
--- a/xen/arch/x86/include/asm/cpuid.h
+++ b/xen/arch/x86/include/asm/cpuid.h
@@ -2,10 +2,12 @@
#define __X86_CPUID_H__
#include <asm/cpufeatureset.h>
+#include <asm/x86-vendors.h>
-#include <xen/types.h>
+#include <xen/compiler.h>
#include <xen/kernel.h>
#include <xen/percpu.h>
+#include <xen/types.h>
#include <public/sysctl.h>
@@ -56,6 +58,51 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
(IS_ENABLED(CONFIG_SHANGHAI) ? X86_VENDOR_SHANGHAI : 0) | \
(IS_ENABLED(CONFIG_HYGON) ? X86_VENDOR_HYGON : 0))
+/*
+ * When compiling Xen for a single vendor with no fallback vendor there's no
+ * need no check the candidate. `vendor` is always a compile-time constant,
+ * which means this all can fold into a constant boolean.
+ *
+ * A runtime check at the time of CPUID probing guarantees we never run on
+ * wrong hardware and another check when loading CPU policies guarantees we
+ * never run policies for a vendor in another vendor's silicon.
+ *
+ * By the same token, the same folding can happen when no vendor is compiled
+ * in and the fallback path is present.
+ */
+static always_inline bool x86_vendor_is(uint8_t candidate, uint8_t vendor)
+{
+ uint8_t filtered_vendor = vendor & X86_ENABLED_VENDORS;
+
+ if ( vendor == X86_VENDOR_UNKNOWN )
+ {
+ if ( IS_ENABLED(CONFIG_UNKNOWN_CPU) )
+ /* no-vendor optimisation */
+ return X86_ENABLED_VENDORS ? vendor == candidate : true;
+
+ /* unknown-vendor-elimination optimisation */
+ return false;
+ }
+
+ /* single-vendor optimisation */
+ if ( !IS_ENABLED(CONFIG_UNKNOWN_CPU) &&
+ (ISOLATE_LSB(X86_ENABLED_VENDORS) == X86_ENABLED_VENDORS) )
+ return filtered_vendor == X86_ENABLED_VENDORS;
+
+ /* compiled-out-vendor-elimination optimisation */
+ if ( !filtered_vendor )
+ return false;
+
+ /*
+ * When checking against a single vendor, perform an equality check, as
+ * it yields (marginally) better codegen
+ */
+ if ( ISOLATE_LSB(filtered_vendor) == filtered_vendor )
+ return filtered_vendor == candidate ;
+
+ return filtered_vendor & candidate;
+}
+
#endif /* !__X86_CPUID_H__ */
/*
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |