[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] Ping: [PATCH v2] x86: assorted array_index_nospec() insertions



> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: 16 August 2018 09:03
> To: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>
> Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>;
> Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
> Subject: Ping: [PATCH v2] x86: assorted array_index_nospec() insertions
> 
> >>> On 26.07.18 at 15:07,  wrote:
> > Don't chance having Spectre v1 (including BCBS) gadgets. In some of the
> > cases the insertions are more of precautionary nature rather than there
> > provably being a gadget, but I think we should err on the safe (secure)
> > side here.
> >
> > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> > ---
> > v2: Re-base. Drop guest_cpuid() changes. Fix off-by-1 in
> >     {do,compat}_dm_op().
> >
> > --- a/xen/arch/x86/domctl.c
> > +++ b/xen/arch/x86/domctl.c
> > @@ -28,6 +28,7 @@
> >  #include <xen/hypercall.h> /* for arch_do_domctl */
> >  #include <xsm/xsm.h>
> >  #include <xen/iommu.h>
> > +#include <xen/nospec.h>
> >  #include <xen/vm_event.h>
> >  #include <public/vm_event.h>
> >  #include <asm/mem_sharing.h>
> > @@ -93,27 +94,34 @@ static int update_domain_cpuid_info(stru
> >      /* Insert ctl data into cpuid_policy. */
> >      switch ( ctl->input[0] )
> >      {
> > +        unsigned int idx;
> > +
> >      case 0x00000000 ... ARRAY_SIZE(p->basic.raw) - 1:
> >          switch ( ctl->input[0] )
> >          {
> >          case 4:
> > -            p->cache.raw[ctl->input[1]] = leaf;
> > +            idx = array_index_nospec(ctl->input[1], ARRAY_SIZE(p-
> >cache.raw));
> > +            p->cache.raw[idx] = leaf;
> >              break;
> >
> >          case 7:
> > -            p->feat.raw[ctl->input[1]] = leaf;
> > +            idx = array_index_nospec(ctl->input[1], 
> > ARRAY_SIZE(p->feat.raw));
> > +            p->feat.raw[idx] = leaf;
> >              break;
> >
> >          case 0xb:
> > -            p->topo.raw[ctl->input[1]] = leaf;
> > +            idx = array_index_nospec(ctl->input[1], 
> > ARRAY_SIZE(p->topo.raw));
> > +            p->topo.raw[idx] = leaf;
> >              break;
> >
> >          case XSTATE_CPUID:
> > -            p->xstate.raw[ctl->input[1]] = leaf;
> > +            idx = array_index_nospec(ctl->input[1], ARRAY_SIZE(p-
> >xstate.raw));
> > +            p->xstate.raw[idx] = leaf;
> >              break;
> >
> >          default:
> > -            p->basic.raw[ctl->input[0]] = leaf;
> > +            idx = array_index_nospec(ctl->input[0], ARRAY_SIZE(p-
> >basic.raw));
> > +            p->basic.raw[idx] = leaf;
> >              break;
> >          }
> >          break;
> > @@ -127,7 +135,9 @@ static int update_domain_cpuid_info(stru
> >          break;
> >
> >      case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
> > -        p->extd.raw[ctl->input[0] - 0x80000000] = leaf;
> > +        idx = array_index_nospec(ctl->input[0] & 0xffff,
> > +                                 ARRAY_SIZE(p->extd.raw));
> > +        p->extd.raw[idx] = leaf;
> >          break;
> >      }
> >
> > --- a/xen/arch/x86/hvm/dm.c
> > +++ b/xen/arch/x86/hvm/dm.c
> > @@ -17,6 +17,7 @@
> >  #include <xen/event.h>
> >  #include <xen/guest_access.h>
> >  #include <xen/hypercall.h>
> > +#include <xen/nospec.h>
> >  #include <xen/sched.h>
> >
> >  #include <asm/hap.h>
> > @@ -232,7 +233,7 @@ static int set_mem_type(struct domain *d
> >                          struct xen_dm_op_set_mem_type *data)
> >  {
> >      xen_pfn_t last_pfn = data->first_pfn + data->nr - 1;
> > -    unsigned int iter = 0;
> > +    unsigned int iter = 0, mem_type;
> >      int rc = 0;
> >
> >      /* Interface types to internal p2m types */
> > @@ -252,7 +253,9 @@ static int set_mem_type(struct domain *d
> >           unlikely(data->mem_type == HVMMEM_unused) )
> >          return -EINVAL;
> >
> > -    if ( data->mem_type  == HVMMEM_ioreq_server )
> > +    mem_type = array_index_nospec(data->mem_type,
> ARRAY_SIZE(memtype));
> > +
> > +    if ( mem_type == HVMMEM_ioreq_server )
> >      {
> >          unsigned int flags;
> >
> > @@ -279,10 +282,10 @@ static int set_mem_type(struct domain *d
> >
> >          if ( p2m_is_shared(t) )
> >              rc = -EAGAIN;
> > -        else if ( !allow_p2m_type_change(t, memtype[data->mem_type]) )
> > +        else if ( !allow_p2m_type_change(t, memtype[mem_type]) )
> >              rc = -EINVAL;
> >          else
> > -            rc = p2m_change_type_one(d, pfn, t, memtype[data-
> >mem_type]);
> > +            rc = p2m_change_type_one(d, pfn, t, memtype[mem_type]);
> >
> >          put_gfn(d, pfn);
> >
> > @@ -387,6 +390,8 @@ static int dm_op(const struct dmop_args
> >          goto out;
> >      }
> >
> > +    op.op = array_index_nospec(op.op, ARRAY_SIZE(op_size));
> > +
> >      if ( op_args->buf[0].size < offset + op_size[op.op] )
> >          goto out;
> >
> > @@ -739,7 +744,7 @@ int compat_dm_op(domid_t domid,
> >          return -E2BIG;
> >
> >      args.domid = domid;
> > -    args.nr_bufs = nr_bufs;
> > +    args.nr_bufs = array_index_nospec(nr_bufs, ARRAY_SIZE(args.buf) +
> 1);
> >
> >      for ( i = 0; i < args.nr_bufs; i++ )
> >      {
> > @@ -776,7 +781,7 @@ long do_dm_op(domid_t domid,
> >          return -E2BIG;
> >
> >      args.domid = domid;
> > -    args.nr_bufs = nr_bufs;
> > +    args.nr_bufs = array_index_nospec(nr_bufs, ARRAY_SIZE(args.buf) +
> 1);
> >
> >      if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) )
> >          return -EFAULT;
> > --- a/xen/arch/x86/hvm/hypercall.c
> > +++ b/xen/arch/x86/hvm/hypercall.c
> > @@ -20,6 +20,7 @@
> >   */
> >  #include <xen/lib.h>
> >  #include <xen/hypercall.h>
> > +#include <xen/nospec.h>
> >
> >  #include <asm/hvm/support.h>
> >
> > @@ -181,8 +182,15 @@ int hvm_hypercall(struct cpu_user_regs *
> >      BUILD_BUG_ON(ARRAY_SIZE(hvm_hypercall_table) >
> >                   ARRAY_SIZE(hypercall_args_table));
> >
> > -    if ( (eax >= ARRAY_SIZE(hvm_hypercall_table)) ||
> > -         !hvm_hypercall_table[eax].native )
> > +    if ( eax >= ARRAY_SIZE(hvm_hypercall_table) )
> > +    {
> > +        regs->rax = -ENOSYS;
> > +        return HVM_HCALL_completed;
> > +    }
> > +
> > +    eax = array_index_nospec(eax, ARRAY_SIZE(hvm_hypercall_table));
> > +
> > +    if ( !hvm_hypercall_table[eax].native )
> >      {
> >          regs->rax = -ENOSYS;
> >          return HVM_HCALL_completed;
> > --- a/xen/arch/x86/mm/mem_access.c
> > +++ b/xen/arch/x86/mm/mem_access.c
> > @@ -23,6 +23,7 @@
> >
> >  #include <xen/guest_access.h> /* copy_from_guest() */
> >  #include <xen/mem_access.h>
> > +#include <xen/nospec.h>
> >  #include <xen/vm_event.h>
> >  #include <xen/event.h>
> >  #include <public/vm_event.h>
> > @@ -334,6 +335,7 @@ static bool xenmem_access_to_p2m_access(
> >      switch ( xaccess )
> >      {
> >      case 0 ... ARRAY_SIZE(memaccess) - 1:
> > +        xaccess = array_index_nospec(xaccess, ARRAY_SIZE(memaccess));
> >          *paccess = memaccess[xaccess];
> >          break;
> >      case XENMEM_access_default:
> > --- a/xen/arch/x86/pv/hypercall.c
> > +++ b/xen/arch/x86/pv/hypercall.c
> > @@ -21,6 +21,7 @@
> >
> >  #include <xen/compiler.h>
> >  #include <xen/hypercall.h>
> > +#include <xen/nospec.h>
> >  #include <xen/trace.h>
> >
> >  #define HYPERCALL(x)                                                \
> > @@ -99,8 +100,15 @@ void pv_hypercall(struct cpu_user_regs *
> >      BUILD_BUG_ON(ARRAY_SIZE(pv_hypercall_table) >
> >                   ARRAY_SIZE(hypercall_args_table));
> >
> > -    if ( (eax >= ARRAY_SIZE(pv_hypercall_table)) ||
> > -         !pv_hypercall_table[eax].native )
> > +    if ( eax >= ARRAY_SIZE(pv_hypercall_table) )
> > +    {
> > +        regs->rax = -ENOSYS;
> > +        return;
> > +    }
> > +
> > +    eax = array_index_nospec(eax, ARRAY_SIZE(pv_hypercall_table));
> > +
> > +    if ( !pv_hypercall_table[eax].native )
> >      {
> >          regs->rax = -ENOSYS;
> >          return;
> >
> >
> >
> >
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.