|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 05/10] x86: Implement Intel Processor Trace context switch
Load/Restore Intel Processor Trace Register in context switch.
MSR IA32_RTIT_CTL is loaded/stored automatically from VMCS.
When Intel Processor Trace is supported in guest, we need
to load/restore MSRs only when this feature is enabled
in guest.
Signed-off-by: Luwei Kang <luwei.kang@xxxxxxxxx>
---
xen/arch/x86/cpu/ipt.c | 101 +++++++++++++++++++++++++++++++++++++
xen/arch/x86/hvm/vmx/vmx.c | 10 ++++
xen/include/asm-x86/hvm/vmx/vmcs.h | 3 ++
xen/include/asm-x86/ipt.h | 23 +++++++++
4 files changed, 137 insertions(+)
diff --git a/xen/arch/x86/cpu/ipt.c b/xen/arch/x86/cpu/ipt.c
index 1fd7f51..b81a155 100644
--- a/xen/arch/x86/cpu/ipt.c
+++ b/xen/arch/x86/cpu/ipt.c
@@ -21,7 +21,9 @@
#include <xen/init.h>
#include <xen/lib.h>
#include <xen/string.h>
+#include <asm/hvm/vmx/vmx.h>
#include <asm/ipt.h>
+#include <asm/msr.h>
/* ipt: Flag to enable Intel Processor Trace (default off). */
unsigned int __read_mostly ipt_mode = IPT_MODE_OFF;
@@ -40,3 +42,102 @@ static int __init parse_ipt_params(const char *str)
return 0;
}
+
+static inline void ipt_load_msr(const struct ipt_ctx *ctx,
+ unsigned int addr_range)
+{
+ unsigned int i;
+
+ wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
+ wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+ for ( i = 0; i < addr_range; i++ )
+ {
+ wrmsrl(MSR_IA32_RTIT_ADDR_A(i), ctx->addr[i * 2]);
+ wrmsrl(MSR_IA32_RTIT_ADDR_B(i), ctx->addr[i * 2 + 1]);
+ }
+}
+
+static inline void ipt_save_msr(struct ipt_ctx *ctx, unsigned int addr_range)
+{
+ unsigned int i;
+
+ rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
+ rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+ for ( i = 0; i < addr_range; i++ )
+ {
+ rdmsrl(MSR_IA32_RTIT_ADDR_A(i), ctx->addr[i * 2]);
+ rdmsrl(MSR_IA32_RTIT_ADDR_B(i), ctx->addr[i * 2 + 1]);
+ }
+}
+
+void ipt_guest_enter(struct vcpu *v)
+{
+ struct ipt_desc *ipt = v->arch.hvm_vmx.ipt_desc;
+
+ if ( !ipt )
+ return;
+
+ /*
+ * Need re-initialize the guest state of IA32_RTIT_CTL
+ * When this vcpu be scheduled to another Physical CPU.
+ * TBD: Performance optimization. Add a new item in
+ * struct ipt_desc to record the last pcpu, and check
+ * if this vcpu is scheduled to another pcpu here (like vpmu).
+ */
+ vmx_vmcs_enter(v);
+ __vmwrite(GUEST_IA32_RTIT_CTL, ipt->ipt_guest.ctl);
+ vmx_vmcs_exit(v);
+
+ if ( ipt->ipt_guest.ctl & RTIT_CTL_TRACEEN )
+ ipt_load_msr(&ipt->ipt_guest, ipt->addr_range);
+}
+
+void ipt_guest_exit(struct vcpu *v)
+{
+ struct ipt_desc *ipt = v->arch.hvm_vmx.ipt_desc;
+
+ if ( !ipt )
+ return;
+
+ if ( ipt->ipt_guest.ctl & RTIT_CTL_TRACEEN )
+ ipt_save_msr(&ipt->ipt_guest, ipt->addr_range);
+}
+
+int ipt_initialize(struct vcpu *v)
+{
+ struct ipt_desc *ipt = NULL;
+ unsigned int eax, tmp, addr_range;
+
+ if ( !cpu_has_ipt || (ipt_mode == IPT_MODE_OFF) ||
+ !(v->arch.hvm_vmx.secondary_exec_control & SECONDARY_EXEC_PT_USE_GPA)
)
+ return 0;
+
+ if ( cpuid_eax(IPT_CPUID) == 0 )
+ return -EINVAL;
+
+ cpuid_count(IPT_CPUID, 1, &eax, &tmp, &tmp, &tmp);
+ addr_range = eax & IPT_ADDR_RANGE_MASK;
+ ipt = _xzalloc(sizeof(struct ipt_desc) + sizeof(uint64_t) * addr_range * 2,
+ __alignof(*ipt));
+ if ( !ipt )
+ return -ENOMEM;
+
+ ipt->addr_range = addr_range;
+ ipt->ipt_guest.output_mask = RTIT_OUTPUT_MASK_DEFAULT;
+ v->arch.hvm_vmx.ipt_desc = ipt;
+
+ return 0;
+}
+
+void ipt_destroy(struct vcpu *v)
+{
+ if ( v->arch.hvm_vmx.ipt_desc )
+ {
+ xfree(v->arch.hvm_vmx.ipt_desc);
+ v->arch.hvm_vmx.ipt_desc = NULL;
+ }
+}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 9707514..060ab65 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -55,6 +55,7 @@
#include <asm/hvm/nestedhvm.h>
#include <asm/altp2m.h>
#include <asm/event.h>
+#include <asm/ipt.h>
#include <asm/mce.h>
#include <asm/monitor.h>
#include <public/arch-x86/cpuid.h>
@@ -466,11 +467,16 @@ static int vmx_vcpu_initialise(struct vcpu *v)
if ( v->vcpu_id == 0 )
v->arch.user_regs.rax = 1;
+ rc = ipt_initialize(v);
+ if ( rc )
+ dprintk(XENLOG_ERR, "%pv: Failed to init Intel Processor Trace.\n", v);
+
return 0;
}
static void vmx_vcpu_destroy(struct vcpu *v)
{
+ ipt_destroy(v);
/*
* There are cases that domain still remains in log-dirty mode when it is
* about to be destroyed (ex, user types 'xl destroy <dom>'), in which case
@@ -3508,6 +3514,8 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
__vmread(GUEST_RSP, ®s->rsp);
__vmread(GUEST_RFLAGS, ®s->rflags);
+ ipt_guest_exit(v);
+
hvm_invalidate_regs_fields(regs);
if ( paging_mode_hap(v->domain) )
@@ -4281,6 +4289,8 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
}
out:
+ ipt_guest_enter(curr);
+
if ( unlikely(curr->arch.hvm_vmx.lbr_fixup_enabled) )
lbr_fixup();
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 2990992..2388e27 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -174,6 +174,8 @@ struct arch_vmx_struct {
* pCPU and wakeup the related vCPU.
*/
struct pi_blocking_vcpu pi_blocking;
+
+ struct ipt_desc *ipt_desc;
};
int vmx_create_vmcs(struct vcpu *v);
@@ -421,6 +423,7 @@ enum vmcs_field {
GUEST_PDPTE0 = 0x0000280a,
#define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */
GUEST_BNDCFGS = 0x00002812,
+ GUEST_IA32_RTIT_CTL = 0x00002814,
HOST_PAT = 0x00002c00,
HOST_EFER = 0x00002c02,
HOST_PERF_GLOBAL_CTRL = 0x00002c04,
diff --git a/xen/include/asm-x86/ipt.h b/xen/include/asm-x86/ipt.h
index 65b064c..a69f049 100644
--- a/xen/include/asm-x86/ipt.h
+++ b/xen/include/asm-x86/ipt.h
@@ -26,6 +26,29 @@
#define IPT_CPUID 0x00000014
+#define IPT_ADDR_RANGE_MASK 0x00000007
+#define RTIT_OUTPUT_MASK_DEFAULT 0x0000007f
+
extern unsigned int ipt_mode;
+struct ipt_ctx {
+ uint64_t ctl;
+ uint64_t status;
+ uint64_t output_base;
+ uint64_t output_mask;
+ uint64_t cr3_match;
+ uint64_t addr[0];
+};
+
+struct ipt_desc {
+ unsigned int addr_range;
+ struct ipt_ctx ipt_guest;
+};
+
+extern void ipt_guest_enter(struct vcpu *v);
+extern void ipt_guest_exit(struct vcpu *v);
+
+extern int ipt_initialize(struct vcpu *v);
+extern void ipt_destroy(struct vcpu *v);
+
#endif /* __ASM_X86_HVM_IPT_H_ */
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |