|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 1/3] x86/vmx: introduce vmx_find_msr()
Modify vmx_add_msr() to use a variation of insertion sort algorithm:
find a place for the new entry and shift all subsequent elements before
insertion.
The new vmx_find_msr() exploits the fact that MSR list is now sorted
and reuses the existing code for binary search.
Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2 --> v3:
- Now "idx < *msr_count" is compared before "(*msr_area)[idx].index"
in order to eliminate possible out-of-bounds access
xen/arch/x86/hvm/vmx/vmcs.c | 45 ++++++++++++++++++++++++++++++++++++--
xen/include/asm-x86/hvm/vmx/vmcs.h | 1 +
2 files changed, 44 insertions(+), 2 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 5306a48..977106f 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1320,6 +1320,44 @@ static int construct_vmcs(struct vcpu *v)
return 0;
}
+static int vmx_msr_entry_key_cmp(const void *key, const void *elt)
+{
+ const u32 *msr = key;
+ const struct vmx_msr_entry *entry = elt;
+
+ if ( *msr > entry->index )
+ return 1;
+ if ( *msr < entry->index )
+ return -1;
+
+ return 0;
+}
+
+struct vmx_msr_entry *vmx_find_msr(u32 msr, int type)
+{
+ struct vcpu *curr = current;
+ unsigned int msr_count;
+ struct vmx_msr_entry *msr_area;
+
+ if ( type == VMX_GUEST_MSR )
+ {
+ msr_count = curr->arch.hvm_vmx.msr_count;
+ msr_area = curr->arch.hvm_vmx.msr_area;
+ }
+ else
+ {
+ ASSERT(type == VMX_HOST_MSR);
+ msr_count = curr->arch.hvm_vmx.host_msr_count;
+ msr_area = curr->arch.hvm_vmx.host_msr_area;
+ }
+
+ if ( msr_area == NULL )
+ return NULL;
+
+ return bsearch(&msr, msr_area, msr_count, sizeof(struct vmx_msr_entry),
+ vmx_msr_entry_key_cmp);
+}
+
int vmx_read_guest_msr(u32 msr, u64 *val)
{
struct vcpu *curr = current;
@@ -1388,14 +1426,17 @@ int vmx_add_msr(u32 msr, int type)
__vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
}
- for ( idx = 0; idx < *msr_count; idx++ )
+ for ( idx = 0; idx < *msr_count && (*msr_area)[idx].index <= msr; idx++ )
if ( (*msr_area)[idx].index == msr )
return 0;
if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
return -ENOSPC;
- msr_area_elem = *msr_area + *msr_count;
+ memmove(*msr_area + idx + 1, *msr_area + idx,
+ sizeof(*msr_area_elem) * (*msr_count - idx));
+
+ msr_area_elem = *msr_area + idx;
msr_area_elem->index = msr;
msr_area_elem->mbz = 0;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 4ee01da..4945b78 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -530,6 +530,7 @@ void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr,
int type);
void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
int vmx_read_guest_msr(u32 msr, u64 *val);
int vmx_write_guest_msr(u32 msr, u64 val);
+struct vmx_msr_entry *vmx_find_msr(u32 msr, int type);
int vmx_add_msr(u32 msr, int type);
void vmx_vmcs_switch(paddr_t from, paddr_t to);
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
--
2.9.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |