@@ -47,7 +47,7 @@ void idle_loop(void)
local_irq_disable();
if ( cpu_is_haltable(smp_processor_id()) )
{
- dsb();
+ dsb(sy);
wfi();
}
local_irq_enable();
@@ -137,7 +137,7 @@ static void gic_irq_enable(struct irq_desc *desc)
spin_lock_irqsave(&desc->lock, flags);
spin_lock(&gic.lock);
desc->status &= ~IRQ_DISABLED;
- dsb();
+ dsb(sy);
/* Enable routing */
GICD[GICD_ISENABLER + irq / 32] = (1u << (irq % 32));
spin_unlock(&gic.lock);
@@ -478,7 +478,7 @@ void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi)
cpumask_and(&online_mask, cpumask, &cpu_online_map);
mask = gic_cpu_mask(&online_mask);
- dsb();
+ dsb(sy);
GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST
| (mask<<GICD_SGI_TARGET_SHIFT)
@@ -495,7 +495,7 @@ void send_SGI_self(enum gic_sgi sgi)
{
ASSERT(sgi < 16); /* There are only 16 SGIs */
- dsb();
+ dsb(sy);
GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF
| sgi;
@@ -505,7 +505,7 @@ void send_SGI_allbutself(enum gic_sgi sgi)
{
ASSERT(sgi < 16); /* There are only 16 SGIs */
- dsb();
+ dsb(sy);
GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS
| sgi;
@@ -589,7 +589,7 @@ static int __setup_irq(struct irq_desc *desc, unsigned int irq,
return -EBUSY;
desc->action = new;
- dsb();
+ dsb(sy);
return 0;
}
@@ -345,10 +345,10 @@ void flush_page_to_ram(unsigned long mfn)
{
void *p, *v = map_domain_page(mfn);
- dsb(); /* So the CPU issues all writes to the range */
+ dsb(sy); /* So the CPU issues all writes to the range */
for ( p = v; p < v + PAGE_SIZE ; p += cacheline_bytes )
asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p));
- dsb(); /* So we know the flushes happen before continuing */
+ dsb(sy); /* So we know the flushes happen before continuing */
unmap_domain_page(v);
}
@@ -48,7 +48,7 @@ static inline int vexpress_ctrl_start(uint32_t *syscfg, int write,
/* wait for complete flag to be set */
do {
stat = syscfg[V2M_SYS_CFGSTAT/4];
- dsb();
+ dsb(sy);
} while ( !(stat & V2M_SYS_CFG_COMPLETE) );
/* check error status and return error flag if set */
@@ -113,10 +113,10 @@ static void vexpress_reset(void)
/* switch to slow mode */
writel(0x3, sp810);
- dsb(); isb();
+ dsb(sy); isb();
/* writing any value to SCSYSSTAT reg will reset the system */
writel(0x1, sp810 + 4);
- dsb(); isb();
+ dsb(sy); isb();
iounmap(sp810);
}
@@ -341,7 +341,7 @@ void stop_cpu(void)
local_irq_disable();
cpu_is_dead = 1;
/* Make sure the write happens before we sleep forever */
- dsb();
+ dsb(sy);
isb();
while ( 1 )
wfi();
@@ -260,7 +260,7 @@ void udelay(unsigned long usecs)
s_time_t deadline = get_s_time() + 1000 * (s_time_t) usecs;
while ( get_s_time() - deadline < 0 )
;
- dsb();
+ dsb(sy);
isb();
}
@@ -78,7 +78,7 @@ void (*video_puts)(const char *) = vga_noop_puts;
static void hdlcd_flush(void)
{
- dsb();
+ dsb(sy);
}
static int __init get_color_masks(const char* bpp, struct color_masks **masks)
@@ -4,44 +4,44 @@
/* Flush local TLBs, current VMID only */
static inline void flush_tlb_local(void)
{
- dsb();
+ dsb(sy);
WRITE_CP32((uint32_t) 0, TLBIALL);
- dsb();
+ dsb(sy);
isb();
}
/* Flush inner shareable TLBs, current VMID only */
static inline void flush_tlb(void)
{
- dsb();
+ dsb(sy);
WRITE_CP32((uint32_t) 0, TLBIALLIS);
- dsb();
+ dsb(sy);
isb();
}
/* Flush local TLBs, all VMIDs, non-hypervisor mode */
static inline void flush_tlb_all_local(void)
{
- dsb();
+ dsb(sy);
WRITE_CP32((uint32_t) 0, TLBIALLNSNH);
- dsb();
+ dsb(sy);
isb();
}
/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
static inline void flush_tlb_all(void)
{
- dsb();
+ dsb(sy);
WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
- dsb();
+ dsb(sy);
isb();
}
@@ -67,13 +67,13 @@ static inline void flush_xen_data_tlb(void)
static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
{
unsigned long end = va + size;
- dsb(); /* Ensure preceding are visible */
+ dsb(sy); /* Ensure preceding are visible */
while ( va < end ) {
asm volatile(STORE_CP32(0, TLBIMVAH)
: : "r" (va) : "memory");
va += PAGE_SIZE;
}
- dsb(); /* Ensure completion of the TLB flush */
+ dsb(sy); /* Ensure completion of the TLB flush */
isb();
}
@@ -60,13 +60,13 @@ static inline void flush_xen_data_tlb(void)
static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
{
unsigned long end = va + size;
- dsb(); /* Ensure preceding are visible */
+ dsb(sy); /* Ensure preceding are visible */
while ( va < end ) {
asm volatile("tlbi vae2, %0;"
: : "r" (va>>PAGE_SHIFT) : "memory");
va += PAGE_SIZE;
}
- dsb(); /* Ensure completion of the TLB flush */
+ dsb(sy); /* Ensure completion of the TLB flush */
isb();
}
@@ -263,10 +263,10 @@ extern size_t cacheline_bytes;
static inline void clean_xen_dcache_va_range(void *p, unsigned long size)
{
void *end;
- dsb(); /* So the CPU issues all writes to the range */
+ dsb(sy); /* So the CPU issues all writes to the range */
for ( end = p + size; p < end; p += cacheline_bytes )
asm volatile (__clean_xen_dcache_one(0) : : "r" (p));
- dsb(); /* So we know the flushes happen before continuing */
+ dsb(sy); /* So we know the flushes happen before continuing */
}
/* Macro for flushing a single small item. The predicate is always
@@ -13,16 +13,16 @@
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
-#define dsb() asm volatile("dsb sy" : : : "memory")
-#define dmb() asm volatile("dmb sy" : : : "memory")
+#define dsb(scope) asm volatile("dsb " #scope : : : "memory")
+#define dmb(scope) asm volatile("dmb " #scope : : : "memory")
-#define mb() dsb()
-#define rmb() dsb()
-#define wmb() dsb()
+#define mb() dsb(sy)
+#define rmb() dsb(sy)
+#define wmb() dsb(sy)
-#define smp_mb() dmb()
-#define smp_rmb() dmb()
-#define smp_wmb() dmb()
+#define smp_mb() dmb(sy)
+#define smp_rmb() dmb(sy)
+#define smp_wmb() dmb(sy)
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))