|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH V2 8/8] x86/hvm: factor out vm_event related functions into separate file
To avoid growing hvm.c these functions can be stored
separately.
Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
xen/arch/x86/hvm/Makefile | 3 +-
xen/arch/x86/hvm/event.c | 195 ++++++++++++++++++++++++++++++++++++++++
xen/arch/x86/hvm/hvm.c | 163 +--------------------------------
xen/arch/x86/hvm/vmx/vmx.c | 1 +
xen/include/asm-x86/hvm/event.h | 40 +++++++++
xen/include/asm-x86/hvm/hvm.h | 11 ---
6 files changed, 239 insertions(+), 174 deletions(-)
create mode 100644 xen/arch/x86/hvm/event.c
create mode 100644 xen/include/asm-x86/hvm/event.h
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index eea5555..2389923 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -22,4 +22,5 @@ obj-y += vlapic.o
obj-y += vmsi.o
obj-y += vpic.o
obj-y += vpt.o
-obj-y += vpmu.o
\ No newline at end of file
+obj-y += vpmu.o
+obj-y += event.o
diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c
new file mode 100644
index 0000000..96d1748
--- /dev/null
+++ b/xen/arch/x86/hvm/event.c
@@ -0,0 +1,195 @@
+/*
+* event.c: Common hardware virtual machine event abstractions.
+*
+* Copyright (c) 2004, Intel Corporation.
+* Copyright (c) 2005, International Business Machines Corporation.
+* Copyright (c) 2008, Citrix Systems, Inc.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms and conditions of the GNU General Public License,
+* version 2, as published by the Free Software Foundation.
+*
+* This program is distributed in the hope it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+* Place - Suite 330, Boston, MA 02111-1307 USA.
+*/
+
+#include <xen/vm_event.h>
+#include <xen/paging.h>
+#include <public/vm_event.h>
+
+static void hvm_event_fill_regs(vm_event_request_t *req)
+{
+ const struct cpu_user_regs *regs = guest_cpu_user_regs();
+ const struct vcpu *curr = current;
+
+ req->regs.x86.rax = regs->eax;
+ req->regs.x86.rcx = regs->ecx;
+ req->regs.x86.rdx = regs->edx;
+ req->regs.x86.rbx = regs->ebx;
+ req->regs.x86.rsp = regs->esp;
+ req->regs.x86.rbp = regs->ebp;
+ req->regs.x86.rsi = regs->esi;
+ req->regs.x86.rdi = regs->edi;
+
+ req->regs.x86.r8 = regs->r8;
+ req->regs.x86.r9 = regs->r9;
+ req->regs.x86.r10 = regs->r10;
+ req->regs.x86.r11 = regs->r11;
+ req->regs.x86.r12 = regs->r12;
+ req->regs.x86.r13 = regs->r13;
+ req->regs.x86.r14 = regs->r14;
+ req->regs.x86.r15 = regs->r15;
+
+ req->regs.x86.rflags = regs->eflags;
+ req->regs.x86.rip = regs->eip;
+
+ req->regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
+ req->regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
+ req->regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
+ req->regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
+}
+
+static int hvm_event_traps(long parameters, vm_event_request_t *req)
+{
+ int rc;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
+
+ if ( !(parameters & HVMPME_MODE_MASK) )
+ return 0;
+
+ rc = vm_event_claim_slot(d, &d->vm_event->monitor);
+ if ( rc == -ENOSYS )
+ {
+ /* If there was no ring to handle the event, then
+ * simple continue executing normally. */
+ return 1;
+ }
+ else if ( rc < 0 )
+ return rc;
+
+ if ( (parameters & HVMPME_MODE_MASK) == HVMPME_mode_sync )
+ {
+ req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
+ vm_event_vcpu_pause(v);
+ }
+
+ hvm_event_fill_regs(req);
+ vm_event_put_request(d, &d->vm_event->monitor, req);
+
+ return 1;
+}
+
+void hvm_event_cr0(unsigned long value, unsigned long old)
+{
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_CR0,
+ .vcpu_id = current->vcpu_id,
+ .cr_event.new_value = value,
+ .cr_event.old_value = old
+ };
+
+ long parameters = current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_CR0];
+
+ if ( (parameters & HVMPME_onchangeonly) && (value == old) )
+ return;
+
+ hvm_event_traps(parameters, &req);
+}
+
+void hvm_event_cr3(unsigned long value, unsigned long old)
+{
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_CR3,
+ .vcpu_id = current->vcpu_id,
+ .cr_event.new_value = value,
+ .cr_event.old_value = old
+ };
+
+ long parameters = current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_CR3];
+
+ if ( (parameters & HVMPME_onchangeonly) && (value == old) )
+ return;
+
+ hvm_event_traps(parameters, &req);
+}
+
+void hvm_event_cr4(unsigned long value, unsigned long old)
+{
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_CR4,
+ .vcpu_id = current->vcpu_id,
+ .cr_event.new_value = value,
+ .cr_event.old_value = old
+ };
+
+ long parameters = current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_CR4];
+
+ if ( (parameters & HVMPME_onchangeonly) && (value == old) )
+ return;
+
+ hvm_event_traps(parameters, &req);
+}
+
+void hvm_event_msr(unsigned long msr, unsigned long value)
+{
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_MSR,
+ .vcpu_id = current->vcpu_id,
+ .msr_event.msr = msr,
+ .msr_event.new_value = value,
+ };
+
+ hvm_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_MSR],
+ &req);
+}
+
+int hvm_event_int3(unsigned long gla)
+{
+ uint32_t pfec = PFEC_page_present;
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_INT3,
+ .vcpu_id = current->vcpu_id,
+ .int3_event.gla = gla,
+ .int3_event.gfn = paging_gva_to_gfn(current, gla, &pfec)
+ };
+
+ return hvm_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_INT3],
+ &req);
+}
+
+int hvm_event_single_step(unsigned long gla)
+{
+ uint32_t pfec = PFEC_page_present;
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_SINGLESTEP,
+ .vcpu_id = current->vcpu_id,
+ .singlestep_event.gla = gla,
+ .singlestep_event.gfn = paging_gva_to_gfn(current, gla, &pfec)
+ };
+
+ return hvm_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
+ &req);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 48ef545..7c17c5c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -35,7 +35,6 @@
#include <xen/paging.h>
#include <xen/cpu.h>
#include <xen/wait.h>
-#include <xen/vm_event.h>
#include <xen/mem_access.h>
#include <xen/rangeset.h>
#include <asm/shadow.h>
@@ -60,6 +59,7 @@
#include <asm/hvm/cacheattr.h>
#include <asm/hvm/trace.h>
#include <asm/hvm/nestedhvm.h>
+#include <asm/hvm/event.h>
#include <asm/mtrr.h>
#include <asm/apic.h>
#include <public/sched.h>
@@ -6171,167 +6171,6 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
return rc;
}
-static void hvm_event_fill_regs(vm_event_request_t *req)
-{
- const struct cpu_user_regs *regs = guest_cpu_user_regs();
- const struct vcpu *curr = current;
-
- req->regs.x86.rax = regs->eax;
- req->regs.x86.rcx = regs->ecx;
- req->regs.x86.rdx = regs->edx;
- req->regs.x86.rbx = regs->ebx;
- req->regs.x86.rsp = regs->esp;
- req->regs.x86.rbp = regs->ebp;
- req->regs.x86.rsi = regs->esi;
- req->regs.x86.rdi = regs->edi;
-
- req->regs.x86.r8 = regs->r8;
- req->regs.x86.r9 = regs->r9;
- req->regs.x86.r10 = regs->r10;
- req->regs.x86.r11 = regs->r11;
- req->regs.x86.r12 = regs->r12;
- req->regs.x86.r13 = regs->r13;
- req->regs.x86.r14 = regs->r14;
- req->regs.x86.r15 = regs->r15;
-
- req->regs.x86.rflags = regs->eflags;
- req->regs.x86.rip = regs->eip;
-
- req->regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
- req->regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
- req->regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
- req->regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
-}
-
-static int hvm_event_traps(long parameters, vm_event_request_t *req)
-{
- int rc;
- struct vcpu *v = current;
- struct domain *d = v->domain;
-
- if ( !(parameters & HVMPME_MODE_MASK) )
- return 0;
-
- rc = vm_event_claim_slot(d, &d->vm_event->monitor);
- if ( rc == -ENOSYS )
- {
- /* If there was no ring to handle the event, then
- * simple continue executing normally. */
- return 1;
- }
- else if ( rc < 0 )
- return rc;
-
- if ( (parameters & HVMPME_MODE_MASK) == HVMPME_mode_sync )
- {
- req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
- vm_event_vcpu_pause(v);
- }
-
- hvm_event_fill_regs(req);
- vm_event_put_request(d, &d->vm_event->monitor, req);
-
- return 1;
-}
-
-void hvm_event_cr0(unsigned long value, unsigned long old)
-{
- vm_event_request_t req = {
- .reason = VM_EVENT_REASON_CR0,
- .vcpu_id = current->vcpu_id,
- .cr_event.new_value = value,
- .cr_event.old_value = old
- };
-
- long parameters = current->domain->arch.hvm_domain
- .params[HVM_PARAM_MEMORY_EVENT_CR0];
-
- if ( (parameters & HVMPME_onchangeonly) && (value == old) )
- return;
-
- hvm_event_traps(parameters, &req);
-}
-
-void hvm_event_cr3(unsigned long value, unsigned long old)
-{
- vm_event_request_t req = {
- .reason = VM_EVENT_REASON_CR3,
- .vcpu_id = current->vcpu_id,
- .cr_event.new_value = value,
- .cr_event.old_value = old
- };
-
- long parameters = current->domain->arch.hvm_domain
- .params[HVM_PARAM_MEMORY_EVENT_CR3];
-
- if ( (parameters & HVMPME_onchangeonly) && (value == old) )
- return;
-
- hvm_event_traps(parameters, &req);
-}
-
-void hvm_event_cr4(unsigned long value, unsigned long old)
-{
- vm_event_request_t req = {
- .reason = VM_EVENT_REASON_CR4,
- .vcpu_id = current->vcpu_id,
- .cr_event.new_value = value,
- .cr_event.old_value = old
- };
-
- long parameters = current->domain->arch.hvm_domain
- .params[HVM_PARAM_MEMORY_EVENT_CR4];
-
- if ( (parameters & HVMPME_onchangeonly) && (value == old) )
- return;
-
- hvm_event_traps(parameters, &req);
-}
-
-void hvm_event_msr(unsigned long msr, unsigned long value)
-{
- vm_event_request_t req = {
- .reason = VM_EVENT_REASON_MSR,
- .vcpu_id = current->vcpu_id,
- .msr_event.msr = msr,
- .msr_event.new_value = value,
- };
-
- hvm_event_traps(current->domain->arch.hvm_domain
- .params[HVM_PARAM_MEMORY_EVENT_MSR],
- &req);
-}
-
-int hvm_event_int3(unsigned long gla)
-{
- uint32_t pfec = PFEC_page_present;
- vm_event_request_t req = {
- .reason = VM_EVENT_REASON_INT3,
- .vcpu_id = current->vcpu_id,
- .int3_event.gla = gla,
- .int3_event.gfn = paging_gva_to_gfn(current, gla, &pfec)
- };
-
- return hvm_event_traps(current->domain->arch.hvm_domain
- .params[HVM_PARAM_MEMORY_EVENT_INT3],
- &req);
-}
-
-int hvm_event_single_step(unsigned long gla)
-{
- uint32_t pfec = PFEC_page_present;
- vm_event_request_t req = {
- .reason = VM_EVENT_REASON_SINGLESTEP,
- .vcpu_id = current->vcpu_id,
- .singlestep_event.gla = gla,
- .singlestep_event.gfn = paging_gva_to_gfn(current, gla, &pfec)
- };
-
- return hvm_event_traps(current->domain->arch.hvm_domain
- .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
- &req);
-}
-
int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
{
if (hvm_funcs.nhvm_vcpu_hostrestore)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d2c39f4..42429ab 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -52,6 +52,7 @@
#include <asm/hvm/vpt.h>
#include <public/hvm/save.h>
#include <asm/hvm/trace.h>
+#include <asm/hvm/event.h>
#include <asm/xenoprof.h>
#include <asm/debugger.h>
#include <asm/apic.h>
diff --git a/xen/include/asm-x86/hvm/event.h b/xen/include/asm-x86/hvm/event.h
new file mode 100644
index 0000000..5a498a9
--- /dev/null
+++ b/xen/include/asm-x86/hvm/event.h
@@ -0,0 +1,40 @@
+/*
+ * event.h: Hardware virtual machine assist events.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __ASM_X86_HVM_EVENT_H__
+#define __ASM_X86_HVM_EVENT_H__
+
+/* Called for current VCPU on crX changes by guest */
+void hvm_event_cr0(unsigned long value, unsigned long old);
+void hvm_event_cr3(unsigned long value, unsigned long old);
+void hvm_event_cr4(unsigned long value, unsigned long old);
+void hvm_event_msr(unsigned long msr, unsigned long value);
+/* Called for current VCPU on int3: returns -1 if no listener */
+int hvm_event_int3(unsigned long gla);
+int hvm_event_single_step(unsigned long gla);
+
+#endif /* __ASM_X86_HVM_EVENT_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 5ac390b..c77076a 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -473,17 +473,6 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t
*msr_content);
int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t
msr_content);
-/* Called for current VCPU on crX changes by guest */
-void hvm_event_cr0(unsigned long value, unsigned long old);
-void hvm_event_cr3(unsigned long value, unsigned long old);
-void hvm_event_cr4(unsigned long value, unsigned long old);
-void hvm_event_msr(unsigned long msr, unsigned long value);
-/* Called for current VCPU on int3: returns -1 if no listener */
-int hvm_event_int3(unsigned long gla);
-
-/* Called for current VCPU on single step: returns -1 if no listener */
-int hvm_event_single_step(unsigned long gla);
-
/*
* Nested HVM
*/
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |