|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 30/44] x86/pv: Break handle_ldt_mapping_fault() out of handle_gdt_ldt_mapping_fault()
Future changes will alter the conditions under which we expect to take faults.
One adjustment however is to exclude the use of this fixup path for non-PV
guests. Well-formed code shouldn't reference the LDT while in HVM vcpu
context, but currently on a context switch from PV to HVM context, there may
be a stale LDT selector loaded, over an unmapped region.
By explicitly excluding HVM context at this point, we avoid erroneous
hypervisor execution resulting in a cascade failure, by falling into
pv_map_ldt_shadow_page().
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
v2:
* Correct the sense of the HVM context check
* Reduce offset to unsigned int. It will be at maximum 0xfffc
---
xen/arch/x86/traps.c | 79 ++++++++++++++++++++++++++++++----------------------
1 file changed, 46 insertions(+), 33 deletions(-)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index ef9464b..2f1540e 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1094,6 +1094,48 @@ static void reserved_bit_page_fault(unsigned long addr,
show_execution_state(regs);
}
+static int handle_ldt_mapping_fault(unsigned int offset,
+ struct cpu_user_regs *regs)
+{
+ struct vcpu *curr = current;
+
+ /*
+ * Not in PV context? Something is very broken. Leave it to the #PF
+ * handler, which will probably result in a panic().
+ */
+ if ( !is_pv_vcpu(curr) )
+ return 0;
+
+ /* Try to copy a mapping from the guest's LDT, if it is valid. */
+ if ( likely(pv_map_ldt_shadow_page(offset)) )
+ {
+ if ( guest_mode(regs) )
+ trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT,
+ regs->rip, offset);
+ }
+ else
+ {
+ /* In hypervisor mode? Leave it to the #PF handler to fix up. */
+ if ( !guest_mode(regs) )
+ return 0;
+
+ /* Access would have become non-canonical? Pass #GP[sel] back. */
+ if ( unlikely(!is_canonical_address(
+ curr->arch.pv_vcpu.ldt_base + offset)) )
+ {
+ uint16_t ec = (offset & ~(X86_XEC_EXT | X86_XEC_IDT)) | X86_XEC_TI;
+
+ pv_inject_hw_exception(TRAP_gp_fault, ec);
+ }
+ else
+ /* else pass the #PF back, with adjusted %cr2. */
+ pv_inject_page_fault(regs->error_code,
+ curr->arch.pv_vcpu.ldt_base + offset);
+ }
+
+ return EXCRET_fault_fixed;
+}
+
static int handle_gdt_ldt_mapping_fault(unsigned long offset,
struct cpu_user_regs *regs)
{
@@ -1115,40 +1157,11 @@ static int handle_gdt_ldt_mapping_fault(unsigned long
offset,
offset &= (1UL << (GDT_LDT_VCPU_VA_SHIFT-1)) - 1UL;
if ( likely(is_ldt_area) )
- {
- /* LDT fault: Copy a mapping from the guest's LDT, if it is valid. */
- if ( likely(pv_map_ldt_shadow_page(offset)) )
- {
- if ( guest_mode(regs) )
- trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT,
- regs->rip, offset);
- }
- else
- {
- /* In hypervisor mode? Leave it to the #PF handler to fix up. */
- if ( !guest_mode(regs) )
- return 0;
+ return handle_ldt_mapping_fault(offset, regs);
- /* Access would have become non-canonical? Pass #GP[sel] back. */
- if ( unlikely(!is_canonical_address(
- curr->arch.pv_vcpu.ldt_base + offset)) )
- {
- uint16_t ec = (offset & ~(X86_XEC_EXT | X86_XEC_IDT)) |
X86_XEC_TI;
-
- pv_inject_hw_exception(TRAP_gp_fault, ec);
- }
- else
- /* else pass the #PF back, with adjusted %cr2. */
- pv_inject_page_fault(regs->error_code,
- curr->arch.pv_vcpu.ldt_base + offset);
- }
- }
- else
- {
- /* GDT fault: handle the fault as #GP(selector). */
- regs->error_code = offset & ~(X86_XEC_EXT | X86_XEC_IDT | X86_XEC_TI);
- (void)do_general_protection(regs);
- }
+ /* GDT fault: handle the fault as #GP(selector). */
+ regs->error_code = offset & ~(X86_XEC_EXT | X86_XEC_IDT | X86_XEC_TI);
+ (void)do_general_protection(regs);
return EXCRET_fault_fixed;
}
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |