Message ID | 1396515585-5737-6-git-send-email-ian.campbell@citrix.com |
---|---|
State | New |
Headers | show |
At 09:59 +0100 on 03 Apr (1396515585), Ian Campbell wrote: > We only need an inner shareable barrier here. Following the logic we had earlier, do the earlier dsbs here only need to be store barriers? Tim. > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > --- > v4: new patch > --- > xen/include/asm-arm/page.h | 16 ++++++++-------- > 1 file changed, 8 insertions(+), 8 deletions(-) > > diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h > index a96e40b..b4d5597 100644 > --- a/xen/include/asm-arm/page.h > +++ b/xen/include/asm-arm/page.h > @@ -263,20 +263,20 @@ extern size_t cacheline_bytes; > static inline void clean_xen_dcache_va_range(void *p, unsigned long size) > { > void *end; > - dsb(sy); /* So the CPU issues all writes to the range */ > + dsb(ish); /* So the CPU issues all writes to the range */ > for ( end = p + size; p < end; p += cacheline_bytes ) > asm volatile (__clean_xen_dcache_one(0) : : "r" (p)); > - dsb(sy); /* So we know the flushes happen before continuing */ > + dsb(ish); /* So we know the flushes happen before continuing */ > } > > static inline void clean_and_invalidate_xen_dcache_va_range > (void *p, unsigned long size) > { > void *end; > - dsb(sy); /* So the CPU issues all writes to the range */ > + dsb(ish); /* So the CPU issues all writes to the range */ > for ( end = p + size; p < end; p += cacheline_bytes ) > asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p)); > - dsb(sy); /* So we know the flushes happen before continuing */ > + dsb(ish); /* So we know the flushes happen before continuing */ > } > > /* Macros for flushing a single small item. The predicate is always > @@ -288,9 +288,9 @@ static inline void clean_and_invalidate_xen_dcache_va_range > clean_xen_dcache_va_range(_p, sizeof(x)); \ > else \ > asm volatile ( \ > - "dsb sy;" /* Finish all earlier writes */ \ > + "dsb ish;" /* Finish all earlier writes */ \ > __clean_xen_dcache_one(0) \ > - "dsb sy;" /* Finish flush before continuing */ \ > + "dsb ish;" /* Finish flush before continuing */ \ > : : "r" (_p), "m" (*_p)); \ > } while (0) > > @@ -300,9 +300,9 @@ static inline void clean_and_invalidate_xen_dcache_va_range > clean_and_invalidate_xen_dcache_va_range(_p, sizeof(x)); \ > else \ > asm volatile ( \ > - "dsb sy;" /* Finish all earlier writes */ \ > + "dsb ish;" /* Finish all earlier writes */ \ > __clean_and_invalidate_xen_dcache_one(0) \ > - "dsb sy;" /* Finish flush before continuing */ \ > + "dsb ish;" /* Finish flush before continuing */ \ > : : "r" (_p), "m" (*_p)); \ > } while (0) > > -- > 1.7.10.4 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
On Thu, 2014-04-03 at 14:55 +0200, Tim Deegan wrote: > At 09:59 +0100 on 03 Apr (1396515585), Ian Campbell wrote: > > We only need an inner shareable barrier here. > > Following the logic we had earlier, do the earlier dsbs here only need > to be store barriers? I think so, and I'm considering doing that in a followup patch. Ian.
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index a96e40b..b4d5597 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -263,20 +263,20 @@ extern size_t cacheline_bytes; static inline void clean_xen_dcache_va_range(void *p, unsigned long size) { void *end; - dsb(sy); /* So the CPU issues all writes to the range */ + dsb(ish); /* So the CPU issues all writes to the range */ for ( end = p + size; p < end; p += cacheline_bytes ) asm volatile (__clean_xen_dcache_one(0) : : "r" (p)); - dsb(sy); /* So we know the flushes happen before continuing */ + dsb(ish); /* So we know the flushes happen before continuing */ } static inline void clean_and_invalidate_xen_dcache_va_range (void *p, unsigned long size) { void *end; - dsb(sy); /* So the CPU issues all writes to the range */ + dsb(ish); /* So the CPU issues all writes to the range */ for ( end = p + size; p < end; p += cacheline_bytes ) asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p)); - dsb(sy); /* So we know the flushes happen before continuing */ + dsb(ish); /* So we know the flushes happen before continuing */ } /* Macros for flushing a single small item. The predicate is always @@ -288,9 +288,9 @@ static inline void clean_and_invalidate_xen_dcache_va_range clean_xen_dcache_va_range(_p, sizeof(x)); \ else \ asm volatile ( \ - "dsb sy;" /* Finish all earlier writes */ \ + "dsb ish;" /* Finish all earlier writes */ \ __clean_xen_dcache_one(0) \ - "dsb sy;" /* Finish flush before continuing */ \ + "dsb ish;" /* Finish flush before continuing */ \ : : "r" (_p), "m" (*_p)); \ } while (0) @@ -300,9 +300,9 @@ static inline void clean_and_invalidate_xen_dcache_va_range clean_and_invalidate_xen_dcache_va_range(_p, sizeof(x)); \ else \ asm volatile ( \ - "dsb sy;" /* Finish all earlier writes */ \ + "dsb ish;" /* Finish all earlier writes */ \ __clean_and_invalidate_xen_dcache_one(0) \ - "dsb sy;" /* Finish flush before continuing */ \ + "dsb ish;" /* Finish flush before continuing */ \ : : "r" (_p), "m" (*_p)); \ } while (0)
We only need an inner shareable barrier here. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v4: new patch --- xen/include/asm-arm/page.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-)