[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 05/10] xen: use SMP barrier in common code dealing with shared memory protocols



On 28/06/2013 17:10, "Ian Campbell" <ian.campbell@xxxxxxxxxx> wrote:

> Xen currently makes no strong distinction between the SMP barriers (smp_mb
> etc) and the regular barrier (mb etc). In Linux, where we inherited these
> names from having imported Linux code which uses them, the SMP barriers are
> intended to be sufficient for implementing shared-memory protocols between
> processors in an SMP system while the standard barriers are useful for MMIO
> etc.
> 
> On x86 with the stronger ordering model there is not much practical difference
> here but ARM has weaker barriers available which are suitable for use as SMP
> barriers.
> 
> Therefore ensure that common code uses the SMP barriers when that is all which
> is required.
> 
> On both ARM and x86 both types of barrier are currently identical so there is
> no actual change. A future patch will change smp_mb to a weaker barrier on
> ARM.
> 
> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
> Cc: jbeuich@xxxxxxxx
> Cc: keir@xxxxxxx
> ---

Acked-by: Keir Fraser <keir@xxxxxxx>

> I'm not convinced some of these mb() couldn't actually be wmb(), but I didn't
> think to hard about this or make any changes along those lines.

Would logically be a separate patch anyway.

 -- Keir

> ---
>  xen/common/domain.c        |    2 +-
>  xen/common/domctl.c        |    2 +-
>  xen/common/grant_table.c   |    4 ++--
>  xen/common/page_alloc.c    |    2 +-
>  xen/common/smp.c           |    4 ++--
>  xen/common/spinlock.c      |    6 +++---
>  xen/common/tmem_xen.c      |   10 +++++-----
>  xen/common/trace.c         |    8 ++++----
>  xen/drivers/char/console.c |    2 +-
>  xen/include/xen/event.h    |    4 ++--
>  xen/xsm/flask/ss/sidtab.c  |    4 ++--
>  11 files changed, 24 insertions(+), 24 deletions(-)
> 
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index fac3470..1380ea9 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -932,7 +932,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn,
> unsigned offset)
>      v->vcpu_info_mfn = page_to_mfn(page);
>  
>      /* Set new vcpu_info pointer /before/ setting pending flags. */
> -    wmb();
> +    smp_wmb();
>  
>      /*
>       * Mark everything as being pending just to make sure nothing gets
> diff --git a/xen/common/domctl.c b/xen/common/domctl.c
> index 9bd8f80..c653efb 100644
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -533,7 +533,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
> u_domctl)
>  
>              /* Install vcpu array /then/ update max_vcpus. */
>              d->vcpu = vcpus;
> -            wmb();
> +            smp_wmb();
>              d->max_vcpus = max;
>          }
>  
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index 3f97328..eb50288 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -426,7 +426,7 @@ static int _set_status_v2(domid_t  domid,
>  
>      /* Make sure guest sees status update before checking if flags are
>         still valid */
> -    mb();
> +    smp_mb();
>  
>      scombo.word = *(u32 *)shah;
>      barrier();
> @@ -1670,7 +1670,7 @@ gnttab_transfer(
>              guest_physmap_add_page(e, sha->full_page.frame, mfn, 0);
>              sha->full_page.frame = mfn;
>          }
> -        wmb();
> +        smp_wmb();
>          shared_entry_header(e->grant_table, gop.ref)->flags |=
>              GTF_transfer_completed;
>  
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 2162ef1..25a7d3d 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -1472,7 +1472,7 @@ int assign_pages(
>          ASSERT(page_get_owner(&pg[i]) == NULL);
>          ASSERT((pg[i].count_info & ~(PGC_allocated | 1)) == 0);
>          page_set_owner(&pg[i], d);
> -        wmb(); /* Domain pointer must be visible before updating refcnt. */
> +        smp_wmb(); /* Domain pointer must be visible before updating refcnt.
> */
>          pg[i].count_info = PGC_allocated | 1;
>          page_list_add_tail(&pg[i], &d->page_list);
>      }
> diff --git a/xen/common/smp.c b/xen/common/smp.c
> index b2b056b..482a203 100644
> --- a/xen/common/smp.c
> +++ b/xen/common/smp.c
> @@ -89,12 +89,12 @@ void smp_call_function_interrupt(void)
>      if ( call_data.wait )
>      {
>          (*func)(info);
> -        mb();
> +        smp_mb();
>          cpumask_clear_cpu(cpu, &call_data.selected);
>      }
>      else
>      {
> -        mb();
> +        smp_mb();
>          cpumask_clear_cpu(cpu, &call_data.selected);
>          (*func)(info);
>      }
> diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
> index bfb9670..575cc6d 100644
> --- a/xen/common/spinlock.c
> +++ b/xen/common/spinlock.c
> @@ -218,7 +218,7 @@ void _spin_barrier(spinlock_t *lock)
>      u64      loop = 0;
>  
>      check_barrier(&lock->debug);
> -    do { mb(); loop++;} while ( _raw_spin_is_locked(&lock->raw) );
> +    do { smp_mb(); loop++;} while ( _raw_spin_is_locked(&lock->raw) );
>      if ((loop > 1) && lock->profile)
>      {
>          lock->profile->time_block += NOW() - block;
> @@ -226,9 +226,9 @@ void _spin_barrier(spinlock_t *lock)
>      }
>  #else
>      check_barrier(&lock->debug);
> -    do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
> +    do { smp_mb(); } while ( _raw_spin_is_locked(&lock->raw) );
>  #endif
> -    mb();
> +    smp_mb();
>  }
>  
>  int _spin_trylock_recursive(spinlock_t *lock)
> diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
> index 736a8c3..54ec09f 100644
> --- a/xen/common/tmem_xen.c
> +++ b/xen/common/tmem_xen.c
> @@ -173,7 +173,7 @@ EXPORT int tmh_copy_from_client(pfp_t *pfp,
>              return -EFAULT;
>          }
>      }
> -    mb();
> +    smp_mb();
>      if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va )
>          tmh_copy_page(tmem_va, cli_va);
>      else if ( (tmem_offset+len <= PAGE_SIZE) &&
> @@ -216,7 +216,7 @@ EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn,
>          return 0;
>      else if ( copy_from_guest(scratch, clibuf, PAGE_SIZE) )
>          return -EFAULT;
> -    mb();
> +    smp_mb();
>      ret = lzo1x_1_compress(cli_va ?: scratch, PAGE_SIZE, dmem, out_len,
> wmem);
>      ASSERT(ret == LZO_E_OK);
>      *out_va = dmem;
> @@ -260,7 +260,7 @@ EXPORT int tmh_copy_to_client(tmem_cli_mfn_t cmfn, pfp_t
> *pfp,
>      unmap_domain_page(tmem_va);
>      if ( cli_va )
>          cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
> -    mb();
> +    smp_mb();
>      return rc;
>  }
>  
> @@ -289,7 +289,7 @@ EXPORT int tmh_decompress_to_client(tmem_cli_mfn_t cmfn,
> void *tmem_va,
>          cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
>      else if ( copy_to_guest(clibuf, scratch, PAGE_SIZE) )
>          return -EFAULT;
> -    mb();
> +    smp_mb();
>      return 1;
>  }
>  
> @@ -311,7 +311,7 @@ EXPORT int tmh_copy_tze_to_client(tmem_cli_mfn_t cmfn,
> void *tmem_va,
>      if ( len < PAGE_SIZE )
>          memset((char *)cli_va+len,0,PAGE_SIZE-len);
>      cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
> -    mb();
> +    smp_mb();
>      return 1;
>  }
>  
> diff --git a/xen/common/trace.c b/xen/common/trace.c
> index fd4ac48..63ea0b7 100644
> --- a/xen/common/trace.c
> +++ b/xen/common/trace.c
> @@ -255,7 +255,7 @@ static int alloc_trace_bufs(unsigned int pages)
>      opt_tbuf_size = pages;
>  
>      printk("xentrace: initialised\n");
> -    wmb(); /* above must be visible before tb_init_done flag set */
> +    smp_wmb(); /* above must be visible before tb_init_done flag set */
>      tb_init_done = 1;
>  
>      return 0;
> @@ -414,7 +414,7 @@ int tb_control(xen_sysctl_tbuf_op_t *tbc)
>          int i;
>  
>          tb_init_done = 0;
> -        wmb();
> +        smp_wmb();
>          /* Clear any lost-record info so we don't get phantom lost records
> next time we
>           * start tracing.  Grab the lock to make sure we're not racing
> anyone.  After this
>           * hypercall returns, no more records should be placed into the
> buffers. */
> @@ -607,7 +607,7 @@ static inline void __insert_record(struct t_buf *buf,
>          memcpy(next_page, (char *)rec + remaining, rec_size - remaining);
>      }
>  
> -    wmb();
> +    smp_wmb();
>  
>      next += rec_size;
>      if ( next >= 2*data_size )
> @@ -718,7 +718,7 @@ void __trace_var(u32 event, bool_t cycles, unsigned int
> extra,
>          return;
>  
>      /* Read tb_init_done /before/ t_bufs. */
> -    rmb();
> +    smp_rmb();
>  
>      spin_lock_irqsave(&this_cpu(t_lock), flags);
>  
> diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
> index 7cd7bf6..b696b3e 100644
> --- a/xen/drivers/char/console.c
> +++ b/xen/drivers/char/console.c
> @@ -648,7 +648,7 @@ void __init console_init_postirq(void)
>      for ( i = conringc ; i != conringp; i++ )
>          ring[i & (opt_conring_size - 1)] = conring[i & (conring_size - 1)];
>      conring = ring;
> -    wmb(); /* Allow users of console_force_unlock() to see larger buffer. */
> +    smp_wmb(); /* Allow users of console_force_unlock() to see larger buffer.
> */
>      conring_size = opt_conring_size;
>      spin_unlock_irq(&console_lock);
>  
> diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
> index 4ac39ad..6f60162 100644
> --- a/xen/include/xen/event.h
> +++ b/xen/include/xen/event.h
> @@ -85,7 +85,7 @@ void notify_via_xen_event_channel(struct domain *ld, int
> lport);
>          if ( condition )                                                \
>              break;                                                      \
>          set_bit(_VPF_blocked_in_xen, &current->pause_flags);            \
> -        mb(); /* set blocked status /then/ re-evaluate condition */     \
> +        smp_mb(); /* set blocked status /then/ re-evaluate condition */ \
>          if ( condition )                                                \
>          {                                                               \
>              clear_bit(_VPF_blocked_in_xen, &current->pause_flags);      \
> @@ -99,7 +99,7 @@ void notify_via_xen_event_channel(struct domain *ld, int
> lport);
>      do {                                                                \
>          set_bit(_VPF_blocked_in_xen, &current->pause_flags);            \
>          raise_softirq(SCHEDULE_SOFTIRQ);                                \
> -        mb(); /* set blocked status /then/ caller does his work */      \
> +        smp_mb(); /* set blocked status /then/ caller does his work */  \
>      } while ( 0 )
>  
>  #endif /* __XEN_EVENT_H__ */
> diff --git a/xen/xsm/flask/ss/sidtab.c b/xen/xsm/flask/ss/sidtab.c
> index 586033c..cd1360c 100644
> --- a/xen/xsm/flask/ss/sidtab.c
> +++ b/xen/xsm/flask/ss/sidtab.c
> @@ -79,13 +79,13 @@ int sidtab_insert(struct sidtab *s, u32 sid, struct
> context *context)
>      if ( prev )
>      {
>          newnode->next = prev->next;
> -        wmb();
> +        smp_wmb();
>          prev->next = newnode;
>      }
>      else
>      {
>          newnode->next = s->htable[hvalue];
> -        wmb();
> +        smp_wmb();
>          s->htable[hvalue] = newnode;
>      }
>  



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.