|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 17/23] PVH xen: Checks, asserts, and limitations for PVH
This patch adds some precautionary checks and debug asserts for PVH. Also,
PVH doesn't support any HVM type guest monitoring at present.
Change in V9:
- Remove ASSERTs from emulate_gate_op and do_device_not_available.
Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 13 +++++++++++++
xen/arch/x86/hvm/mtrr.c | 4 ++++
xen/arch/x86/physdev.c | 13 +++++++++++++
3 files changed, 30 insertions(+), 0 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index bac4708..3b47e6f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4526,8 +4526,11 @@ static int hvm_memory_event_traps(long p, uint32_t
reason,
return 1;
}
+/* PVH fixme: add support for monitoring guest behaviour in below functions. */
void hvm_memory_event_cr0(unsigned long value, unsigned long old)
{
+ if ( is_pvh_vcpu(current) )
+ return;
hvm_memory_event_traps(current->domain->arch.hvm_domain
.params[HVM_PARAM_MEMORY_EVENT_CR0],
MEM_EVENT_REASON_CR0,
@@ -4536,6 +4539,8 @@ void hvm_memory_event_cr0(unsigned long value, unsigned
long old)
void hvm_memory_event_cr3(unsigned long value, unsigned long old)
{
+ if ( is_pvh_vcpu(current) )
+ return;
hvm_memory_event_traps(current->domain->arch.hvm_domain
.params[HVM_PARAM_MEMORY_EVENT_CR3],
MEM_EVENT_REASON_CR3,
@@ -4544,6 +4549,8 @@ void hvm_memory_event_cr3(unsigned long value, unsigned
long old)
void hvm_memory_event_cr4(unsigned long value, unsigned long old)
{
+ if ( is_pvh_vcpu(current) )
+ return;
hvm_memory_event_traps(current->domain->arch.hvm_domain
.params[HVM_PARAM_MEMORY_EVENT_CR4],
MEM_EVENT_REASON_CR4,
@@ -4552,6 +4559,8 @@ void hvm_memory_event_cr4(unsigned long value, unsigned
long old)
void hvm_memory_event_msr(unsigned long msr, unsigned long value)
{
+ if ( is_pvh_vcpu(current) )
+ return;
hvm_memory_event_traps(current->domain->arch.hvm_domain
.params[HVM_PARAM_MEMORY_EVENT_MSR],
MEM_EVENT_REASON_MSR,
@@ -4564,6 +4573,8 @@ int hvm_memory_event_int3(unsigned long gla)
unsigned long gfn;
gfn = paging_gva_to_gfn(current, gla, &pfec);
+ if ( is_pvh_vcpu(current) )
+ return 0;
return hvm_memory_event_traps(current->domain->arch.hvm_domain
.params[HVM_PARAM_MEMORY_EVENT_INT3],
MEM_EVENT_REASON_INT3,
@@ -4576,6 +4587,8 @@ int hvm_memory_event_single_step(unsigned long gla)
unsigned long gfn;
gfn = paging_gva_to_gfn(current, gla, &pfec);
+ if ( is_pvh_vcpu(current) )
+ return 0;
return hvm_memory_event_traps(current->domain->arch.hvm_domain
.params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
MEM_EVENT_REASON_SINGLESTEP,
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index b9d6411..6706af6 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -578,6 +578,10 @@ int32_t hvm_set_mem_pinned_cacheattr(
{
struct hvm_mem_pinned_cacheattr_range *range;
+ /* Side note: A PVH guest writes to MSR_IA32_CR_PAT natively. */
+ if ( is_pvh_domain(d) )
+ return -EOPNOTSUPP;
+
if ( !((type == PAT_TYPE_UNCACHABLE) ||
(type == PAT_TYPE_WRCOMB) ||
(type == PAT_TYPE_WRTHROUGH) ||
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 3733c7a..73c8d2a 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -475,6 +475,13 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void)
arg)
case PHYSDEVOP_set_iopl: {
struct physdev_set_iopl set_iopl;
+
+ if ( is_pvh_vcpu(current) )
+ {
+ ret = -EPERM;
+ break;
+ }
+
ret = -EFAULT;
if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
break;
@@ -488,6 +495,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void)
arg)
case PHYSDEVOP_set_iobitmap: {
struct physdev_set_iobitmap set_iobitmap;
+
+ if ( is_pvh_vcpu(current) )
+ {
+ ret = -EPERM;
+ break;
+ }
ret = -EFAULT;
if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
break;
--
1.7.2.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |