[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 08/10] xen: arm: add scope to dsb and dmb macros



On Mon, 1 Jul 2013, Stefano Stabellini wrote:
> On Fri, 28 Jun 2013, Ian Campbell wrote:
> > Everywhere currently passes "sy"stem, so no actual change.
> > 
> > Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

sorry I meant Acked-by

> 
> >  xen/arch/arm/domain.c                |    2 +-
> >  xen/arch/arm/gic.c                   |    8 ++++----
> >  xen/arch/arm/mm.c                    |    2 +-
> >  xen/arch/arm/platforms/vexpress.c    |    6 +++---
> >  xen/arch/arm/smpboot.c               |    2 +-
> >  xen/arch/arm/time.c                  |    2 +-
> >  xen/drivers/video/arm_hdlcd.c        |    2 +-
> >  xen/include/asm-arm/arm32/flushtlb.h |    8 ++++----
> >  xen/include/asm-arm/arm32/io.h       |    4 ++--
> >  xen/include/asm-arm/arm32/page.h     |    4 ++--
> >  xen/include/asm-arm/arm64/io.h       |    4 ++--
> >  xen/include/asm-arm/arm64/page.h     |    4 ++--
> >  xen/include/asm-arm/page.h           |    4 ++--
> >  xen/include/asm-arm/system.h         |   16 ++++++++--------
> >  14 files changed, 34 insertions(+), 34 deletions(-)
> > 
> > diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> > index 4c434a1..0bd8f6b 100644
> > --- a/xen/arch/arm/domain.c
> > +++ b/xen/arch/arm/domain.c
> > @@ -44,7 +44,7 @@ void idle_loop(void)
> >          local_irq_disable();
> >          if ( cpu_is_haltable(smp_processor_id()) )
> >          {
> > -            dsb();
> > +            dsb("sy");
> >              wfi();
> >          }
> >          local_irq_enable();
> > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> > index 177560e..42095ee 100644
> > --- a/xen/arch/arm/gic.c
> > +++ b/xen/arch/arm/gic.c
> > @@ -432,7 +432,7 @@ void send_SGI_mask(const cpumask_t *cpumask, enum 
> > gic_sgi sgi)
> >  
> >      ASSERT(mask < 0x100); /* The target bitmap only supports 8 CPUs */
> >  
> > -    dsb();
> > +    dsb("sy");
> >  
> >      GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST
> >          | (mask<<GICD_SGI_TARGET_SHIFT)
> > @@ -449,7 +449,7 @@ void send_SGI_self(enum gic_sgi sgi)
> >  {
> >      ASSERT(sgi < 16); /* There are only 16 SGIs */
> >  
> > -    dsb();
> > +    dsb("sy");
> >  
> >      GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF
> >          | sgi;
> > @@ -459,7 +459,7 @@ void send_SGI_allbutself(enum gic_sgi sgi)
> >  {
> >     ASSERT(sgi < 16); /* There are only 16 SGIs */
> >  
> > -   dsb();
> > +   dsb("sy");
> >  
> >     GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS
> >         | sgi;
> > @@ -546,7 +546,7 @@ static int __setup_irq(struct irq_desc *desc, unsigned 
> > int irq,
> >  
> >      desc->action  = new;
> >      desc->status &= ~IRQ_DISABLED;
> > -    dsb();
> > +    dsb("sy");
> >  
> >      desc->handler->startup(desc);
> >  
> > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> > index 3f049cb..b287a9b 100644
> > --- a/xen/arch/arm/mm.c
> > +++ b/xen/arch/arm/mm.c
> > @@ -324,7 +324,7 @@ void __cpuinit setup_virt_paging(void)
> >  #define WRITE_TTBR(ttbr)                                                \
> >      flush_xen_text_tlb();                                               \
> >      WRITE_SYSREG64(ttbr, TTBR0_EL2);                                    \
> > -    dsb(); /* ensure memory accesses do not cross over the TTBR0 write */ \
> > +    dsb("sy"); /* ensure memory accesses do not cross over the TTBR0 write 
> > */ \
> >      /* flush_xen_text_tlb contains an initial isb which ensures the     \
> >       * write to TTBR0 has completed. */                                 \
> >      flush_xen_text_tlb()
> > diff --git a/xen/arch/arm/platforms/vexpress.c 
> > b/xen/arch/arm/platforms/vexpress.c
> > index 8fc30c4..6f6869e 100644
> > --- a/xen/arch/arm/platforms/vexpress.c
> > +++ b/xen/arch/arm/platforms/vexpress.c
> > @@ -46,7 +46,7 @@ static inline int vexpress_ctrl_start(uint32_t *syscfg, 
> > int write,
> >      /* wait for complete flag to be set */
> >      do {
> >          stat = syscfg[V2M_SYS_CFGSTAT/4];
> > -        dsb();
> > +        dsb("sy");
> >      } while ( !(stat & V2M_SYS_CFG_COMPLETE) );
> >  
> >      /* check error status and return error flag if set */
> > @@ -111,10 +111,10 @@ static void vexpress_reset(void)
> >  
> >      /* switch to slow mode */
> >      iowritel(sp810, 0x3);
> > -    dsb(); isb();
> > +    dsb("sy"); isb();
> >      /* writing any value to SCSYSSTAT reg will reset the system */
> >      iowritel(sp810 + 4, 0x1);
> > -    dsb(); isb();
> > +    dsb("sy"); isb();
> >  
> >      iounmap(sp810);
> >  }
> > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
> > index 727e09f..b88355f 100644
> > --- a/xen/arch/arm/smpboot.c
> > +++ b/xen/arch/arm/smpboot.c
> > @@ -211,7 +211,7 @@ void stop_cpu(void)
> >      local_irq_disable();
> >      cpu_is_dead = 1;
> >      /* Make sure the write happens before we sleep forever */
> > -    dsb();
> > +    dsb("sy");
> >      isb();
> >      while ( 1 )
> >          wfi();
> > diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c
> > index 4ed7882..2c254f4 100644
> > --- a/xen/arch/arm/time.c
> > +++ b/xen/arch/arm/time.c
> > @@ -252,7 +252,7 @@ void udelay(unsigned long usecs)
> >      s_time_t deadline = get_s_time() + 1000 * (s_time_t) usecs;
> >      while ( get_s_time() - deadline < 0 )
> >          ;
> > -    dsb();
> > +    dsb("sy");
> >      isb();
> >  }
> >  
> > diff --git a/xen/drivers/video/arm_hdlcd.c b/xen/drivers/video/arm_hdlcd.c
> > index 72979ea..5c09b0e 100644
> > --- a/xen/drivers/video/arm_hdlcd.c
> > +++ b/xen/drivers/video/arm_hdlcd.c
> > @@ -77,7 +77,7 @@ void (*video_puts)(const char *) = vga_noop_puts;
> >  
> >  static void hdlcd_flush(void)
> >  {
> > -    dsb();
> > +    dsb("sy");
> >  }
> >  
> >  static int __init get_color_masks(const char* bpp, struct color_masks 
> > **masks)
> > diff --git a/xen/include/asm-arm/arm32/flushtlb.h 
> > b/xen/include/asm-arm/arm32/flushtlb.h
> > index 14e8827..2776375 100644
> > --- a/xen/include/asm-arm/arm32/flushtlb.h
> > +++ b/xen/include/asm-arm/arm32/flushtlb.h
> > @@ -4,22 +4,22 @@
> >  /* Flush local TLBs, current VMID only */
> >  static inline void flush_tlb_local(void)
> >  {
> > -    dsb();
> > +    dsb("sy");
> >  
> >      WRITE_CP32((uint32_t) 0, TLBIALLIS);
> >  
> > -    dsb();
> > +    dsb("sy");
> >      isb();
> >  }
> >  
> >  /* Flush local TLBs, all VMIDs, non-hypervisor mode */
> >  static inline void flush_tlb_all_local(void)
> >  {
> > -    dsb();
> > +    dsb("sy");
> >  
> >      WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
> >  
> > -    dsb();
> > +    dsb("sy");
> >      isb();
> >  }
> >  
> > diff --git a/xen/include/asm-arm/arm32/io.h b/xen/include/asm-arm/arm32/io.h
> > index ec7e0ff..cb0bc96 100644
> > --- a/xen/include/asm-arm/arm32/io.h
> > +++ b/xen/include/asm-arm/arm32/io.h
> > @@ -30,14 +30,14 @@ static inline uint32_t ioreadl(const volatile void 
> > __iomem *addr)
> >      asm volatile("ldr %1, %0"
> >                   : "+Qo" (*(volatile uint32_t __force *)addr),
> >                     "=r" (val));
> > -    dsb();
> > +    dsb("sy");
> >  
> >      return val;
> >  }
> >  
> >  static inline void iowritel(const volatile void __iomem *addr, uint32_t 
> > val)
> >  {
> > -    dsb();
> > +    dsb("sy");
> >      asm volatile("str %1, %0"
> >                   : "+Qo" (*(volatile uint32_t __force *)addr)
> >                   : "r" (val));
> > diff --git a/xen/include/asm-arm/arm32/page.h 
> > b/xen/include/asm-arm/arm32/page.h
> > index e573502..f8dfbd3 100644
> > --- a/xen/include/asm-arm/arm32/page.h
> > +++ b/xen/include/asm-arm/arm32/page.h
> > @@ -67,13 +67,13 @@ static inline void flush_xen_data_tlb(void)
> >  static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned 
> > long size)
> >  {
> >      unsigned long end = va + size;
> > -    dsb(); /* Ensure preceding are visible */
> > +    dsb("sy"); /* Ensure preceding are visible */
> >      while ( va < end ) {
> >          asm volatile(STORE_CP32(0, TLBIMVAHIS)
> >                       : : "r" (va) : "memory");
> >          va += PAGE_SIZE;
> >      }
> > -    dsb(); /* Ensure completion of the TLB flush */
> > +    dsb("sy"); /* Ensure completion of the TLB flush */
> >      isb();
> >  }
> >  
> > diff --git a/xen/include/asm-arm/arm64/io.h b/xen/include/asm-arm/arm64/io.h
> > index ec041cd..0a100ad 100644
> > --- a/xen/include/asm-arm/arm64/io.h
> > +++ b/xen/include/asm-arm/arm64/io.h
> > @@ -24,14 +24,14 @@ static inline uint32_t ioreadl(const volatile void 
> > __iomem *addr)
> >      uint32_t val;
> >  
> >      asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
> > -    dsb();
> > +    dsb("sy");
> >  
> >      return val;
> >  }
> >  
> >  static inline void iowritel(const volatile void __iomem *addr, uint32_t 
> > val)
> >  {
> > -    dsb();
> > +    dsb("sy");
> >      asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
> >  }
> >  
> > diff --git a/xen/include/asm-arm/arm64/page.h 
> > b/xen/include/asm-arm/arm64/page.h
> > index 28748d3..aca1590 100644
> > --- a/xen/include/asm-arm/arm64/page.h
> > +++ b/xen/include/asm-arm/arm64/page.h
> > @@ -60,13 +60,13 @@ static inline void flush_xen_data_tlb(void)
> >  static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned 
> > long size)
> >  {
> >      unsigned long end = va + size;
> > -    dsb(); /* Ensure preceding are visible */
> > +    dsb("sy"); /* Ensure preceding are visible */
> >      while ( va < end ) {
> >          asm volatile("tlbi vae2is, %0;"
> >                       : : "r" (va>>PAGE_SHIFT) : "memory");
> >          va += PAGE_SIZE;
> >      }
> > -    dsb(); /* Ensure completion of the TLB flush */
> > +    dsb("sy"); /* Ensure completion of the TLB flush */
> >      isb();
> >  }
> >  
> > diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
> > index cd38956..eb007ac 100644
> > --- a/xen/include/asm-arm/page.h
> > +++ b/xen/include/asm-arm/page.h
> > @@ -284,10 +284,10 @@ extern size_t cacheline_bytes;
> >  static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
> >  {
> >      void *end;
> > -    dsb();           /* So the CPU issues all writes to the range */
> > +    dsb("sy");           /* So the CPU issues all writes to the range */
> >      for ( end = p + size; p < end; p += cacheline_bytes )
> >          asm volatile (__flush_xen_dcache_one(0) : : "r" (p));
> > -    dsb();           /* So we know the flushes happen before continuing */
> > +    dsb("sy");           /* So we know the flushes happen before 
> > continuing */
> >  }
> >  
> >  /* Macro for flushing a single small item.  The predicate is always
> > diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
> > index 89c61ef..68efba9 100644
> > --- a/xen/include/asm-arm/system.h
> > +++ b/xen/include/asm-arm/system.h
> > @@ -13,16 +13,16 @@
> >  #define wfi()           asm volatile("wfi" : : : "memory")
> >  
> >  #define isb()           asm volatile("isb" : : : "memory")
> > -#define dsb()           asm volatile("dsb sy" : : : "memory")
> > -#define dmb()           asm volatile("dmb sy" : : : "memory")
> > +#define dsb(scope)      asm volatile("dsb " scope : : : "memory")
> > +#define dmb(scope)      asm volatile("dmb " scope : : : "memory")
> >  
> > -#define mb()            dsb()
> > -#define rmb()           dsb()
> > -#define wmb()           dsb()
> > +#define mb()            dsb("sy")
> > +#define rmb()           dsb("sy")
> > +#define wmb()           dsb("sy")
> >  
> > -#define smp_mb()        dmb()
> > -#define smp_rmb()       dmb()
> > -#define smp_wmb()       dmb()
> > +#define smp_mb()        dmb("sy")
> > +#define smp_rmb()       dmb("sy")
> > +#define smp_wmb()       dmb("sy")
> >  
> >  #define xchg(ptr,x) \
> >          ((__typeof__(*(ptr)))__xchg((unsigned 
> > long)(x),(ptr),sizeof(*(ptr))))
> > -- 
> > 1.7.2.5
> > 
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.