x86: segment attribute handling adjustments Null selector loads into SS (possible in 64-bit mode only, and only in rings other than ring 3) must not alter SS.DPL. (This was found to be an issue on KVM, and fixed in Linux commit 33ab91103b.) Further arch_set_info_hvm_guest() didn't make sure that the ASSERT()s in hvm_set_segment_register() wouldn't trigger: Add further checks, but tolerate (adjust) clear accessed (CS, SS, DS, ES) and busy (TR) bits. Finally the setting of the accessed bits for user segments was lost by commit dd5c85e312 ("x86/hvm: Reposition the modification of raw segment data from the VMCB/VMCS"), yet VMX requires them to be set for usable segments. Add respective ASSERT()s (the only path not properly setting them was arch_set_info_hvm_guest()). Signed-off-by: Jan Beulich --- v2: Simplify TSS type check in arch_set_info_hvm_guest(). --- TBD: Said Linux commit also fiddles with G, D/B, P, S, and TYPE, yet that does not appear to be generally needed - on my Westmere box SS attributes of 0x1c000 (which is what the hardware loads for a null selector load), 0x10000, or 0x14000 do not result in VM entry failures. --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1294,16 +1294,24 @@ static inline int check_segment(struct s return 0; } - if ( seg != x86_seg_tr && !reg->attr.fields.s ) + if ( seg == x86_seg_tr ) { - gprintk(XENLOG_ERR, - "System segment provided for a code or data segment\n"); - return -EINVAL; - } + if ( reg->attr.fields.s ) + { + gprintk(XENLOG_ERR, "Code or data segment provided for TR\n"); + return -EINVAL; + } - if ( seg == x86_seg_tr && reg->attr.fields.s ) + if ( reg->attr.fields.type != SYS_DESC_tss_busy ) + { + gprintk(XENLOG_ERR, "Non-32-bit-TSS segment provided for TR\n"); + return -EINVAL; + } + } + else if ( !reg->attr.fields.s ) { - gprintk(XENLOG_ERR, "Code or data segment provided for TR\n"); + gprintk(XENLOG_ERR, + "System segment provided for a code or data segment\n"); return -EINVAL; } @@ -1366,7 +1374,8 @@ int arch_set_info_hvm_guest(struct vcpu #define SEG(s, r) ({ \ s = (struct segment_register){ .base = (r)->s ## _base, \ .limit = (r)->s ## _limit, \ - .attr.bytes = (r)->s ## _ar }; \ + .attr.bytes = (r)->s ## _ar | \ + (x86_seg_##s != x86_seg_tr ? 1 : 2) }; \ check_segment(&s, x86_seg_ ## s); }) rc = SEG(cs, regs); --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -5731,6 +5731,7 @@ void hvm_set_segment_register(struct vcp case x86_seg_cs: ASSERT(reg->attr.fields.p); /* Usable. */ ASSERT(reg->attr.fields.s); /* User segment. */ + ASSERT(reg->attr.fields.type & 0x1); /* Accessed. */ ASSERT((reg->base >> 32) == 0); /* Upper bits clear. */ break; @@ -5740,6 +5741,7 @@ void hvm_set_segment_register(struct vcp ASSERT(reg->attr.fields.s); /* User segment. */ ASSERT(!(reg->attr.fields.type & 0x8)); /* Data segment. */ ASSERT(reg->attr.fields.type & 0x2); /* Writeable. */ + ASSERT(reg->attr.fields.type & 0x1); /* Accessed. */ ASSERT((reg->base >> 32) == 0); /* Upper bits clear. */ } break; @@ -5755,6 +5757,8 @@ void hvm_set_segment_register(struct vcp if ( reg->attr.fields.type & 0x8 ) ASSERT(reg->attr.fields.type & 0x2); /* Readable. */ + ASSERT(reg->attr.fields.type & 0x1); /* Accessed. */ + if ( seg == x86_seg_fs || seg == x86_seg_gs ) ASSERT(is_canonical_address(reg->base)); else --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -1471,6 +1471,11 @@ protmode_load_seg( else sreg->attr.bytes = 0; sreg->sel = sel; + + /* Since CPL == SS.DPL, we need to put back DPL. */ + if ( seg == x86_seg_ss ) + sreg->attr.fields.dpl = sel; + return X86EMUL_OKAY; }