[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH for 4.9 3/6] x86/hvm: Fix segmentation logic for system segments



> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 31 March 2017 20:51
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich
> <JBeulich@xxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Tim
> (Xen.org) <tim@xxxxxxx>; Julien Grall <julien.grall@xxxxxxx>
> Subject: [PATCH for 4.9 3/6] x86/hvm: Fix segmentation logic for system
> segments
> 
> c/s c785f759718 "x86/emul: Prepare to allow use of system segments for
> memory
> references" made alterations to hvm_virtual_to_linear_addr() to allow for
> the
> use of system segments.
> 
> However, the determination of which segmentation mode to use was based
> on the
> current address size from emulation.  In particular, it is wrong for system
> segment accesses while executing in a compatibility mode code segment.
> 
> Replace the addr_size parameter with a new segmentation mode
> enumeration (to
> prevent this mistake from being made again), and introduce a new helper to
> calculate the correct segmenation mode from current register state.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> CC: Tim Deegan <tim@xxxxxxx>
> CC: Julien Grall <julien.grall@xxxxxxx>
> 
> This is the same logical bug that caused XSA-196, but luckily hasn't yet been
> in a released version of Xen.
> ---
>  xen/arch/x86/hvm/emulate.c      | 17 ++++++++++------
>  xen/arch/x86/hvm/hvm.c          | 45 +++++++++++++++++++++++++++++----
> --------
>  xen/arch/x86/mm/shadow/common.c |  5 ++++-
>  xen/include/asm-x86/hvm/hvm.h   | 12 ++++++++++-
>  4 files changed, 58 insertions(+), 21 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 3d084ca..f578796 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -508,6 +508,7 @@ static int hvmemul_virtual_to_linear(
>      struct hvm_emulate_ctxt *hvmemul_ctxt,
>      unsigned long *linear)
>  {
> +    enum hvm_segmentation_mode seg_mode;
>      struct segment_register *reg;
>      int okay;
>      unsigned long max_reps = 4096;
> @@ -518,6 +519,9 @@ static int hvmemul_virtual_to_linear(
>          return X86EMUL_OKAY;
>      }
> 
> +    seg_mode = hvm_seg_mode(
> +        current, seg, hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt));
> +
>      /*
>       * If introspection has been enabled for this domain, and we're emulating
>       * becase a vm_reply asked us to (i.e. not doing regular IO) reps should
> @@ -548,8 +552,7 @@ static int hvmemul_virtual_to_linear(
>          ASSERT(offset >= ((*reps - 1) * bytes_per_rep));
>          okay = hvm_virtual_to_linear_addr(
>              seg, reg, offset - (*reps - 1) * bytes_per_rep,
> -            *reps * bytes_per_rep, access_type,
> -            hvmemul_ctxt->ctxt.addr_size, linear);
> +            *reps * bytes_per_rep, access_type, seg_mode, linear);
>          *linear += (*reps - 1) * bytes_per_rep;
>          if ( hvmemul_ctxt->ctxt.addr_size != 64 )
>              *linear = (uint32_t)*linear;
> @@ -557,8 +560,8 @@ static int hvmemul_virtual_to_linear(
>      else
>      {
>          okay = hvm_virtual_to_linear_addr(
> -            seg, reg, offset, *reps * bytes_per_rep, access_type,
> -            hvmemul_ctxt->ctxt.addr_size, linear);
> +            seg, reg, offset, *reps * bytes_per_rep,
> +            access_type, seg_mode, linear);
>      }
> 
>      if ( okay )
> @@ -2068,14 +2071,16 @@ void hvm_emulate_init_per_insn(
>      hvmemul_ctxt->insn_buf_eip = hvmemul_ctxt->ctxt.regs->rip;
>      if ( !insn_bytes )
>      {
> +        enum hvm_segmentation_mode seg_mode =
> +            hvm_seg_mode(curr, x86_seg_cs, &hvmemul_ctxt-
> >seg_reg[x86_seg_cs]);
> +
>          hvmemul_ctxt->insn_buf_bytes =
>              hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?:
>              (hvm_virtual_to_linear_addr(x86_seg_cs,
>                                          &hvmemul_ctxt->seg_reg[x86_seg_cs],
>                                          hvmemul_ctxt->insn_buf_eip,
>                                          sizeof(hvmemul_ctxt->insn_buf),
> -                                        hvm_access_insn_fetch,
> -                                        hvmemul_ctxt->ctxt.addr_size,
> +                                        hvm_access_insn_fetch, seg_mode,
>                                          &addr) &&
>               hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
>                                           sizeof(hvmemul_ctxt->insn_buf),
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 9f83cd8..f250afb 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2374,13 +2374,27 @@ int hvm_set_cr4(unsigned long value, bool_t
> may_defer)
>      return X86EMUL_OKAY;
>  }
> 
> +enum hvm_segmentation_mode hvm_seg_mode(
> +    const struct vcpu *v, enum x86_segment seg,
> +    const struct segment_register *cs)
> +{
> +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
> +        return hvm_seg_mode_real;
> +
> +    if ( hvm_long_mode_active(v) &&
> +         (is_x86_system_segment(seg) || cs->attr.fields.l) )
> +        return hvm_seg_mode_long;
> +
> +    return hvm_seg_mode_prot;
> +}
> +
>  bool_t hvm_virtual_to_linear_addr(
>      enum x86_segment seg,
>      const struct segment_register *reg,
>      unsigned long offset,
>      unsigned int bytes,
>      enum hvm_access_type access_type,
> -    unsigned int addr_size,
> +    enum hvm_segmentation_mode seg_mode,
>      unsigned long *linear_addr)
>  {
>      unsigned long addr = offset, last_byte;
> @@ -2394,8 +2408,9 @@ bool_t hvm_virtual_to_linear_addr(
>       */
>      ASSERT(seg < x86_seg_none);
> 
> -    if ( !(current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
> +    switch ( seg_mode )
>      {
> +    case hvm_seg_mode_real:
>          /*
>           * REAL MODE: Don't bother with segment access checks.
>           * Certain of them are not done in native real mode anyway.
> @@ -2404,11 +2419,11 @@ bool_t hvm_virtual_to_linear_addr(
>          last_byte = (uint32_t)addr + bytes - !!bytes;
>          if ( last_byte < addr )
>              goto out;
> -    }
> -    else if ( addr_size != 64 )
> -    {
> +        break;
> +
> +    case hvm_seg_mode_prot:
>          /*
> -         * COMPATIBILITY MODE: Apply segment checks and add base.
> +         * PROTECTED/COMPATIBILITY MODE: Apply segment checks and add
> base.
>           */
> 
>          /*
> @@ -2454,9 +2469,9 @@ bool_t hvm_virtual_to_linear_addr(
>          }
>          else if ( (last_byte > reg->limit) || (last_byte < offset) )
>              goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
> -    }
> -    else
> -    {
> +        break;
> +
> +    case hvm_seg_mode_long:
>          /*
>           * User segments are always treated as present.  System segment may
>           * not be, and also incur limit checks.
> @@ -2476,6 +2491,11 @@ bool_t hvm_virtual_to_linear_addr(
>          if ( !is_canonical_address(addr) || last_byte < addr ||
>               !is_canonical_address(last_byte) )
>              goto out;
> +        break;
> +
> +    default:
> +        ASSERT_UNREACHABLE();
> +        goto out;
>      }
> 
>      /* All checks ok. */
> @@ -3024,7 +3044,7 @@ void hvm_task_switch(
>              sp = regs->sp -= opsz;
>          if ( hvm_virtual_to_linear_addr(x86_seg_ss, &segr, sp, opsz,
>                                          hvm_access_write,
> -                                        16 << segr.attr.fields.db,
> +                                        hvm_seg_mode_prot,
>                                          &linear_addr) )
>          {
>              rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
> @@ -3600,14 +3620,13 @@ void hvm_ud_intercept(struct cpu_user_regs
> *regs)
>          const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
>          uint32_t walk = (ctxt.seg_reg[x86_seg_ss].attr.fields.dpl == 3)
>              ? PFEC_user_mode : 0;
> +        enum hvm_segmentation_mode seg_mode = hvm_seg_mode(cur,
> x86_seg_cs, cs);
>          unsigned long addr;
>          char sig[5]; /* ud2; .ascii "xen" */
> 
>          if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
>                                          sizeof(sig), hvm_access_insn_fetch,
> -                                        (hvm_long_mode_active(cur) &&
> -                                         cs->attr.fields.l) ? 64 :
> -                                        cs->attr.fields.db ? 32 : 16, &addr) 
> &&
> +                                        seg_mode, &addr) &&
>               (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
>                                            walk, NULL) == HVMCOPY_okay) &&
>               (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index 14a07dd..551a7d3 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -144,6 +144,7 @@ static int hvm_translate_virtual_addr(
>      struct sh_emulate_ctxt *sh_ctxt,
>      unsigned long *linear)
>  {
> +    enum hvm_segmentation_mode seg_mode;
>      const struct segment_register *reg;
>      int okay;
> 
> @@ -151,8 +152,10 @@ static int hvm_translate_virtual_addr(
>      if ( IS_ERR(reg) )
>          return -PTR_ERR(reg);
> 
> +    seg_mode = hvm_seg_mode(current, seg,
> hvm_get_seg_reg(x86_seg_cs, sh_ctxt));
> +
>      okay = hvm_virtual_to_linear_addr(
> -        seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, 
> linear);
> +        seg, reg, offset, bytes, access_type, seg_mode, linear);
> 
>      if ( !okay )
>      {
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-
> x86/hvm/hvm.h
> index 49c8001..ed6994c 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -460,6 +460,16 @@ void hvm_task_switch(
>      uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
>      int32_t errcode);
> 
> +enum hvm_segmentation_mode {
> +    hvm_seg_mode_real,
> +    hvm_seg_mode_prot,
> +    hvm_seg_mode_long,
> +};
> +
> +enum hvm_segmentation_mode hvm_seg_mode(
> +    const struct vcpu *v, enum x86_segment seg,
> +    const struct segment_register *cs);
> +
>  enum hvm_access_type {
>      hvm_access_insn_fetch,
>      hvm_access_none,
> @@ -472,7 +482,7 @@ bool_t hvm_virtual_to_linear_addr(
>      unsigned long offset,
>      unsigned int bytes,
>      enum hvm_access_type access_type,
> -    unsigned int addr_size,
> +    enum hvm_segmentation_mode seg_mode,
>      unsigned long *linear_addr);
> 
>  void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
> --
> 2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.