|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 01/11] x86/HVM: improve CET-IBT pruning of ENDBR
__init{const,data}_cf_clobber can have an effect only for pointers
actually populated in the respective tables. While not the case for SVM
right now, VMX installs a number of pointers only under certain
conditions. Hence the respective functions would have their ENDBR purged
only when those conditions are met. Invoke "pruning" functions after
having copied the respective tables, for them to install any "missing"
pointers.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
This is largely cosmetic for present hardware, which when supporting
CET-IBT likely also supports all of the advanced VMX features for which
hook pointers are installed conditionally. The only case this would make
a difference there is when use of respective features was suppressed via
command line option (where available). For future hooks it may end up
relevant even by default, and it also would be if AMD started supporting
CET-IBT; right now it matters only for .pi_update_irte, as iommu_intpost
continues to default to off.
Originally I had meant to put the SVM and VMX functions in presmp-
initcalls, but hvm/{svm,vmx}/built_in.o are linked into hvm/built_in.o
before hvm/hvm.o. And I don't think I want to fiddle with link order
here.
---
v4: Rename functions. Re-base.
v3: Re-base.
v2: Use cpu_has_xen_ibt in {svm,vmx}_fill_funcs().
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -160,10 +160,17 @@ static int __init cf_check hvm_enable(vo
else if ( using_svm() )
fns = start_svm();
+ if ( fns )
+ hvm_funcs = *fns;
+
+ if ( IS_ENABLED(CONFIG_INTEL_VMX) )
+ vmx_fill_funcs();
+ if ( IS_ENABLED(CONFIG_AMD_SVM) )
+ svm_fill_funcs();
+
if ( fns == NULL )
return 0;
- hvm_funcs = *fns;
hvm_enabled = 1;
printk("HVM: %s enabled\n", fns->name);
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2530,6 +2530,19 @@ const struct hvm_function_table * __init
return &svm_function_table;
}
+void __init svm_fill_funcs(void)
+{
+ /*
+ * Now that svm_function_table was copied, populate all function pointers
+ * which may have been left at NULL, for __initdata_cf_clobber to have as
+ * much of an effect as possible.
+ */
+ if ( !cpu_has_xen_ibt )
+ return;
+
+ /* Nothing at present. */
+}
+
void asmlinkage svm_vmexit_handler(void)
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3064,6 +3064,30 @@ const struct hvm_function_table * __init
return &vmx_function_table;
}
+void __init vmx_fill_funcs(void)
+{
+ /*
+ * Now that vmx_function_table was copied, populate all function pointers
+ * which may have been left at NULL, for __initdata_cf_clobber to have as
+ * much of an effect as possible.
+ */
+ if ( !cpu_has_xen_ibt )
+ return;
+
+ vmx_function_table.set_descriptor_access_exiting =
+ vmx_set_descriptor_access_exiting;
+
+ vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
+ vmx_function_table.process_isr = vmx_process_isr;
+ vmx_function_table.handle_eoi = vmx_handle_eoi;
+
+ vmx_function_table.pi_update_irte = vmx_pi_update_irte;
+
+ vmx_function_table.deliver_posted_intr = vmx_deliver_posted_intr;
+ vmx_function_table.sync_pir_to_irr = vmx_sync_pir_to_irr;
+ vmx_function_table.test_pir = vmx_test_pir;
+}
+
/*
* Not all cases receive valid value in the VM-exit instruction length field.
* Callers must know what they're doing!
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -260,6 +260,9 @@ extern int8_t hvm_port80_allowed;
extern const struct hvm_function_table *start_svm(void);
extern const struct hvm_function_table *start_vmx(void);
+void svm_fill_funcs(void);
+void vmx_fill_funcs(void);
+
int hvm_domain_initialise(struct domain *d,
const struct xen_domctl_createdomain *config);
void hvm_domain_relinquish_resources(struct domain *d);
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |