|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 2/9] xen: Optimize introspection access to guest state
Speed optimization for introspection purposes: a handful of registers
are sent along with each mem_event. This requires enlargement of the
mem_event_request / mem_event_response stuctures, and additional code
to fill in relevant values.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 33 +++++++++++++++++
xen/arch/x86/hvm/vmx/vmx.c | 1 +
xen/arch/x86/mm/p2m.c | 61 ++++++++++++++++++++++++++++++++
xen/include/public/arch-x86/hvm/save.h | 4 +++
xen/include/public/mem_event.h | 36 +++++++++++++++++++
5 files changed, 135 insertions(+)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 17ff011..f65a5f5 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -6016,6 +6016,38 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
return rc;
}
+static inline void hvm_mem_event_fill_regs(mem_event_request_t *req)
+{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct vcpu *v = current;
+
+ req->regs.rax = regs->eax;
+ req->regs.rcx = regs->ecx;
+ req->regs.rdx = regs->edx;
+ req->regs.rbx = regs->ebx;
+ req->regs.rsp = regs->esp;
+ req->regs.rbp = regs->ebp;
+ req->regs.rsi = regs->esi;
+ req->regs.rdi = regs->edi;
+
+ req->regs.r8 = regs->r8;
+ req->regs.r9 = regs->r9;
+ req->regs.r10 = regs->r10;
+ req->regs.r11 = regs->r11;
+ req->regs.r12 = regs->r12;
+ req->regs.r13 = regs->r13;
+ req->regs.r14 = regs->r14;
+ req->regs.r15 = regs->r15;
+
+ req->regs.rflags = regs->eflags;
+ req->regs.rip = regs->eip;
+
+ req->regs.msr_efer = v->arch.hvm_vcpu.guest_efer;
+ req->regs.cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ req->regs.cr3 = v->arch.hvm_vcpu.guest_cr[3];
+ req->regs.cr4 = v->arch.hvm_vcpu.guest_cr[4];
+}
+
static int hvm_memory_event_traps(long p, uint32_t reason,
unsigned long value, unsigned long old,
bool_t gla_valid, unsigned long gla)
@@ -6060,6 +6092,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason,
req.gla = old;
}
+ hvm_mem_event_fill_regs(&req);
mem_event_put_request(d, &d->mem_event->access, &req);
return 1;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2caa04a..fed21b6 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -425,6 +425,7 @@ static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu
*c)
c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
c->msr_efer = v->arch.hvm_vcpu.guest_efer;
+ c->guest_x86_mode = vmx_guest_x86_mode(v);
__vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
__vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 642ec28..93252d9 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1314,6 +1314,64 @@ void p2m_mem_paging_resume(struct domain *d)
}
}
+static inline void p2m_mem_event_fill_regs(mem_event_request_t *req)
+{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct segment_register seg;
+ struct hvm_hw_cpu ctxt;
+ struct vcpu *v = current;
+
+ memset(&ctxt, 0, sizeof(struct hvm_hw_cpu));
+
+ /* Architecture-specific vmcs/vmcb bits */
+ hvm_funcs.save_cpu_ctxt(v, &ctxt);
+
+ req->regs.rax = regs->eax;
+ req->regs.rcx = regs->ecx;
+ req->regs.rdx = regs->edx;
+ req->regs.rbx = regs->ebx;
+ req->regs.rsp = regs->esp;
+ req->regs.rbp = regs->ebp;
+ req->regs.rsi = regs->esi;
+ req->regs.rdi = regs->edi;
+
+#ifdef __x86_64__
+ req->regs.r8 = regs->r8;
+ req->regs.r9 = regs->r9;
+ req->regs.r10 = regs->r10;
+ req->regs.r11 = regs->r11;
+ req->regs.r12 = regs->r12;
+ req->regs.r13 = regs->r13;
+ req->regs.r14 = regs->r14;
+ req->regs.r15 = regs->r15;
+#endif
+
+ req->regs.rflags = regs->eflags;
+ req->regs.rip = regs->eip;
+
+ req->regs.dr7 = v->arch.debugreg[7];
+ req->regs.cr0 = ctxt.cr0;
+ req->regs.cr2 = ctxt.cr2;
+ req->regs.cr3 = ctxt.cr3;
+ req->regs.cr4 = ctxt.cr4;
+
+ req->regs.sysenter_cs = ctxt.sysenter_cs;
+ req->regs.sysenter_esp = ctxt.sysenter_esp;
+ req->regs.sysenter_eip = ctxt.sysenter_eip;
+
+ req->regs.msr_efer = ctxt.msr_efer;
+ req->regs.msr_star = ctxt.msr_star;
+ req->regs.msr_lstar = ctxt.msr_lstar;
+
+ hvm_get_segment_register(v, x86_seg_fs, &seg);
+ req->regs.fs_base = seg.base;
+
+ hvm_get_segment_register(v, x86_seg_gs, &seg);
+ req->regs.gs_base = seg.base;
+
+ req->regs.guest_x86_mode = hvm_guest_x86_mode(current);
+}
+
bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
bool_t access_r, bool_t access_w, bool_t access_x,
mem_event_request_t **req_ptr)
@@ -1407,6 +1465,9 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t
gla_valid, unsigned long gla,
if ( p2ma != p2m_access_n2rwx )
vcpu_pause_nosync(v);
+ if ( req )
+ p2m_mem_event_fill_regs(req);
+
/* VCPU may be paused, return whether we promoted automatically */
return (p2ma == p2m_access_n2rwx);
}
diff --git a/xen/include/public/arch-x86/hvm/save.h
b/xen/include/public/arch-x86/hvm/save.h
index 16d85a3..7b659ba 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -157,6 +157,8 @@ struct hvm_hw_cpu {
};
/* error code for pending event */
uint32_t error_code;
+
+ int32_t guest_x86_mode;
};
struct hvm_hw_cpu_compat {
@@ -266,6 +268,8 @@ struct hvm_hw_cpu_compat {
};
/* error code for pending event */
uint32_t error_code;
+
+ int32_t guest_x86_mode;
};
static inline int _hvm_hw_fix_cpu(void *h) {
diff --git a/xen/include/public/mem_event.h b/xen/include/public/mem_event.h
index 3831b41..24ac67d 100644
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -48,6 +48,41 @@
#define MEM_EVENT_REASON_MSR 7 /* MSR was hit: gfn is MSR value,
gla is MSR address;
does NOT honour
HVMPME_onchangeonly */
+typedef struct mem_event_regs_st {
+ uint64_t rax;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbx;
+ uint64_t rsp;
+ uint64_t rbp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t rflags;
+ uint64_t dr7;
+ uint64_t rip;
+ uint64_t cr0;
+ uint64_t cr2;
+ uint64_t cr3;
+ uint64_t cr4;
+ uint64_t sysenter_cs;
+ uint64_t sysenter_esp;
+ uint64_t sysenter_eip;
+ uint64_t msr_efer;
+ uint64_t msr_star;
+ uint64_t msr_lstar;
+ uint64_t fs_base;
+ uint64_t gs_base;
+ int32_t guest_x86_mode;
+} mem_event_regs_t;
+
typedef struct mem_event_st {
uint32_t flags;
uint32_t vcpu_id;
@@ -65,6 +100,7 @@ typedef struct mem_event_st {
uint16_t available:12;
uint16_t reason;
+ mem_event_regs_t regs;
} mem_event_request_t, mem_event_response_t;
DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |