|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/3] x86/svm: add EFER SVME support for VGIF/VLOAD
On 31/01/18 20:35, Brian Woods wrote:
> Only enable virtual VMLOAD/SAVE and VGIF if the guest EFER.SVME is set.
>
> Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Signed-off-by: Brian Woods <brian.woods@xxxxxxx>
> ---
> xen/arch/x86/hvm/svm/svm.c | 69
> +++++++++++++++++++++++++++++++++++++++++++++
> xen/arch/x86/hvm/svm/vmcb.c | 17 -----------
> 2 files changed, 69 insertions(+), 17 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index c48fdfaa5d..7864ee39ae 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -601,6 +601,73 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
> }
> }
>
> +/*
> + * This runs on EFER change to see if nested features need to either be
> + * turned off or on.
> + */
> +static void svm_nested_features_on_efer_update(struct vcpu *v)
> +{
> + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
> + struct nestedsvm *svm = &vcpu_nestedsvm(v);
> + u32 general2_intercepts;
> + vintr_t vintr;
> +
> + /*
> + * Need state for transfering the nested gif status so only write on
> + * the hvm_vcpu EFER.SVME changing.
> + */
Indenting is off, but that can be fixed on commit.
> + if ( (v->arch.hvm_vcpu.guest_efer & EFER_SVME) &&
> + nestedhvm_enabled(v->domain))
> + {
> + if ( (vmcb->virt_ext.fields.vloadsave_enable == 0) &&
> + paging_mode_hap(v->domain) &&
> + cpu_has_svm_vloadsave )
> + {
> + vmcb->virt_ext.fields.vloadsave_enable = 1;
> + general2_intercepts = vmcb_get_general2_intercepts(vmcb);
> + general2_intercepts &= ~(GENERAL2_INTERCEPT_VMLOAD |
> + GENERAL2_INTERCEPT_VMSAVE);
> + vmcb_set_general2_intercepts(vmcb, general2_intercepts);
> + }
> +
> + if ( (vmcb->_vintr.fields.vgif_enable == 0) &&
> + cpu_has_svm_vgif )
> + {
> + vintr = vmcb_get_vintr(vmcb);
> + vintr.fields.vgif = svm->ns_gif;
> + vintr.fields.vgif_enable = 1;
> + vmcb_set_vintr(vmcb, vintr);
> + general2_intercepts = vmcb_get_general2_intercepts(vmcb);
> + general2_intercepts &= ~(GENERAL2_INTERCEPT_STGI |
> + GENERAL2_INTERCEPT_CLGI);
> + vmcb_set_general2_intercepts(vmcb, general2_intercepts);
> + }
> + }
> + else
> + {
> + if ( vmcb->virt_ext.fields.vloadsave_enable == 1 )
> + {
> + vmcb->virt_ext.fields.vloadsave_enable = 0;
> + general2_intercepts = vmcb_get_general2_intercepts(vmcb);
> + general2_intercepts |= (GENERAL2_INTERCEPT_VMLOAD |
> + GENERAL2_INTERCEPT_VMSAVE);
> + vmcb_set_general2_intercepts(vmcb, general2_intercepts);
> + }
> +
> + if ( vmcb->_vintr.fields.vgif_enable == 1 )
> + {
> + vintr = vmcb_get_vintr(vmcb);
> + svm->ns_gif = vintr.fields.vgif;
> + vintr.fields.vgif_enable = 0;
> + vmcb_set_vintr(vmcb, vintr);
> + general2_intercepts = vmcb_get_general2_intercepts(vmcb);
> + general2_intercepts |= (GENERAL2_INTERCEPT_STGI |
> + GENERAL2_INTERCEPT_CLGI);
> + vmcb_set_general2_intercepts(vmcb, general2_intercepts);
> + }
> + }
> +}
> +
As some extra cleanup, what about folding this diff in? It avoids
repeatedly hitting the cleanbits, and is clearer to follow IMO.
~Andrew
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index e750bed..a0f8e76 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -609,63 +609,58 @@ static void
svm_nested_features_on_efer_update(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- u32 general2_intercepts;
- vintr_t vintr;
-
- /*
- * Need state for transfering the nested gif status so only write on
- * the hvm_vcpu EFER.SVME changing.
- */
- if ( (v->arch.hvm_vcpu.guest_efer & EFER_SVME) &&
- nestedhvm_enabled(v->domain))
- {
- if ( (vmcb->virt_ext.fields.vloadsave_enable == 0) &&
- paging_mode_hap(v->domain) &&
- cpu_has_svm_vloadsave )
+ uint32_t general2_intercepts = vmcb_get_general2_intercepts(vmcb);
+ uint32_t orig_general2_intercepts = general2_intercepts;
+ vintr_t vintr = vmcb_get_vintr(vmcb), orig_vintr = vintr;
+
+ if ( !nestedhvm_enabled(v->domain) )
+ ASSERT(!(v->arch.hvm_vcpu.guest_efer & EFER_SVME));
+
+ /*
+ * Need state for transfering the nested gif status so only write on
+ * the hvm_vcpu EFER.SVME changing.
+ */
+ if ( v->arch.hvm_vcpu.guest_efer & EFER_SVME )
+ {
+ if ( !vmcb->virt_ext.fields.vloadsave_enable &&
+ paging_mode_hap(v->domain) && cpu_has_svm_vloadsave )
{
vmcb->virt_ext.fields.vloadsave_enable = 1;
- general2_intercepts = vmcb_get_general2_intercepts(vmcb);
general2_intercepts &= ~(GENERAL2_INTERCEPT_VMLOAD |
GENERAL2_INTERCEPT_VMSAVE);
- vmcb_set_general2_intercepts(vmcb, general2_intercepts);
}
- if ( (vmcb->_vintr.fields.vgif_enable == 0) &&
- cpu_has_svm_vgif )
+ if ( !vintr.fields.vgif_enable && cpu_has_svm_vgif )
{
- vintr = vmcb_get_vintr(vmcb);
vintr.fields.vgif = svm->ns_gif;
vintr.fields.vgif_enable = 1;
- vmcb_set_vintr(vmcb, vintr);
- general2_intercepts = vmcb_get_general2_intercepts(vmcb);
general2_intercepts &= ~(GENERAL2_INTERCEPT_STGI |
GENERAL2_INTERCEPT_CLGI);
- vmcb_set_general2_intercepts(vmcb, general2_intercepts);
}
}
else
{
- if ( vmcb->virt_ext.fields.vloadsave_enable == 1 )
+ if ( vmcb->virt_ext.fields.vloadsave_enable )
{
vmcb->virt_ext.fields.vloadsave_enable = 0;
- general2_intercepts = vmcb_get_general2_intercepts(vmcb);
general2_intercepts |= (GENERAL2_INTERCEPT_VMLOAD |
GENERAL2_INTERCEPT_VMSAVE);
- vmcb_set_general2_intercepts(vmcb, general2_intercepts);
}
- if ( vmcb->_vintr.fields.vgif_enable == 1 )
+ if ( vintr.fields.vgif_enable )
{
- vintr = vmcb_get_vintr(vmcb);
svm->ns_gif = vintr.fields.vgif;
vintr.fields.vgif_enable = 0;
- vmcb_set_vintr(vmcb, vintr);
- general2_intercepts = vmcb_get_general2_intercepts(vmcb);
general2_intercepts |= (GENERAL2_INTERCEPT_STGI |
GENERAL2_INTERCEPT_CLGI);
- vmcb_set_general2_intercepts(vmcb, general2_intercepts);
}
}
+
+ if ( general2_intercepts != orig_general2_intercepts )
+ vmcb_set_general2_intercepts(vmcb, general2_intercepts);
+
+ if ( vintr.bytes != orig_vintr.bytes )
+ vmcb_set_vintr(vmcb, vintr);
}
static void svm_update_guest_efer(struct vcpu *v)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |